if_vr.c revision 1.30 1 /* $NetBSD: if_vr.c,v 1.30 1999/12/12 02:56:49 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1997, 1998
42 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 * must display the following acknowledgement:
54 * This product includes software developed by Bill Paul.
55 * 4. Neither the name of the author nor the names of any co-contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
69 * THE POSSIBILITY OF SUCH DAMAGE.
70 *
71 * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
72 */
73
74 /*
75 * VIA Rhine fast ethernet PCI NIC driver
76 *
77 * Supports various network adapters based on the VIA Rhine
78 * and Rhine II PCI controllers, including the D-Link DFE530TX.
79 * Datasheets are available at http://www.via.com.tw.
80 *
81 * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
82 * Electrical Engineering Department
83 * Columbia University, New York City
84 */
85
86 /*
87 * The VIA Rhine controllers are similar in some respects to the
88 * the DEC tulip chips, except less complicated. The controller
89 * uses an MII bus and an external physical layer interface. The
90 * receiver has a one entry perfect filter and a 64-bit hash table
91 * multicast filter. Transmit and receive descriptors are similar
92 * to the tulip.
93 *
94 * The Rhine has a serious flaw in its transmit DMA mechanism:
95 * transmit buffers must be longword aligned. Unfortunately,
96 * the kernel doesn't guarantee that mbufs will be filled in starting
97 * at longword boundaries, so we have to do a buffer copy before
98 * transmission.
99 *
100 * Apparently, the receive DMA mechanism also has the same flaw. This
101 * means that on systems with struct alignment requirements, incoming
102 * frames must be copied to a new buffer which shifts the data forward
103 * 2 bytes so that the payload is aligned on a 4-byte boundary.
104 */
105
106 #include "opt_inet.h"
107
108 #include <sys/param.h>
109 #include <sys/systm.h>
110 #include <sys/sockio.h>
111 #include <sys/mbuf.h>
112 #include <sys/malloc.h>
113 #include <sys/kernel.h>
114 #include <sys/socket.h>
115 #include <sys/device.h>
116
117 #include <vm/vm.h> /* for PAGE_SIZE */
118
119 #include <net/if.h>
120 #include <net/if_arp.h>
121 #include <net/if_dl.h>
122 #include <net/if_media.h>
123 #include <net/if_ether.h>
124
125 #if defined(INET)
126 #include <netinet/in.h>
127 #include <netinet/if_inarp.h>
128 #endif
129
130 #include "bpfilter.h"
131 #if NBPFILTER > 0
132 #include <net/bpf.h>
133 #endif
134
135 #include <machine/bus.h>
136 #include <machine/intr.h>
137 #include <machine/endian.h>
138
139 #include <dev/mii/mii.h>
140 #include <dev/mii/miivar.h>
141 #include <dev/mii/mii_bitbang.h>
142
143 #include <dev/pci/pcireg.h>
144 #include <dev/pci/pcivar.h>
145 #include <dev/pci/pcidevs.h>
146
147 #include <dev/pci/if_vrreg.h>
148
149 #define VR_USEIOSPACE
150
151 /*
152 * Various supported device vendors/types and their names.
153 */
154 static struct vr_type {
155 pci_vendor_id_t vr_vid;
156 pci_product_id_t vr_did;
157 const char *vr_name;
158 } vr_devs[] = {
159 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
160 "VIA VT3043 (Rhine) 10/100" },
161 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
162 "VIA VT86C100A (Rhine-II) 10/100" },
163 { 0, 0, NULL }
164 };
165
166 /*
167 * Transmit descriptor list size.
168 */
169 #define VR_NTXDESC 64
170 #define VR_NTXDESC_MASK (VR_NTXDESC - 1)
171 #define VR_NEXTTX(x) (((x) + 1) & VR_NTXDESC_MASK)
172
173 /*
174 * Receive descriptor list size.
175 */
176 #define VR_NRXDESC 64
177 #define VR_NRXDESC_MASK (VR_NRXDESC - 1)
178 #define VR_NEXTRX(x) (((x) + 1) & VR_NRXDESC_MASK)
179
180 /*
181 * Control data structres that are DMA'd to the Rhine chip. We allocate
182 * them in a single clump that maps to a single DMA segment to make several
183 * things easier.
184 *
185 * Note that since we always copy outgoing packets to aligned transmit
186 * buffers, we can reduce the transmit descriptors to one per packet.
187 */
188 struct vr_control_data {
189 struct vr_desc vr_txdescs[VR_NTXDESC];
190 struct vr_desc vr_rxdescs[VR_NRXDESC];
191 };
192
193 #define VR_CDOFF(x) offsetof(struct vr_control_data, x)
194 #define VR_CDTXOFF(x) VR_CDOFF(vr_txdescs[(x)])
195 #define VR_CDRXOFF(x) VR_CDOFF(vr_rxdescs[(x)])
196
197 /*
198 * Software state of transmit and receive descriptors.
199 */
200 struct vr_descsoft {
201 struct mbuf *ds_mbuf; /* head of mbuf chain */
202 bus_dmamap_t ds_dmamap; /* our DMA map */
203 };
204
205 struct vr_softc {
206 struct device vr_dev; /* generic device glue */
207 void *vr_ih; /* interrupt cookie */
208 void *vr_ats; /* shutdown hook */
209 bus_space_tag_t vr_bst; /* bus space tag */
210 bus_space_handle_t vr_bsh; /* bus space handle */
211 bus_dma_tag_t vr_dmat; /* bus DMA tag */
212 pci_chipset_tag_t vr_pc; /* PCI chipset info */
213 struct ethercom vr_ec; /* Ethernet common info */
214 u_int8_t vr_enaddr[ETHER_ADDR_LEN];
215 struct mii_data vr_mii; /* MII/media info */
216
217 bus_dmamap_t vr_cddmamap; /* control data DMA map */
218 #define vr_cddma vr_cddmamap->dm_segs[0].ds_addr
219
220 /*
221 * Software state for transmit and receive descriptors.
222 */
223 struct vr_descsoft vr_txsoft[VR_NTXDESC];
224 struct vr_descsoft vr_rxsoft[VR_NRXDESC];
225
226 /*
227 * Control data structures.
228 */
229 struct vr_control_data *vr_control_data;
230
231 int vr_txpending; /* number of TX requests pending */
232 int vr_txdirty; /* first dirty TX descriptor */
233 int vr_txlast; /* last used TX descriptor */
234
235 int vr_rxptr; /* next ready RX descriptor */
236 };
237
238 #define VR_CDTXADDR(sc, x) ((sc)->vr_cddma + VR_CDTXOFF((x)))
239 #define VR_CDRXADDR(sc, x) ((sc)->vr_cddma + VR_CDRXOFF((x)))
240
241 #define VR_CDTX(sc, x) (&(sc)->vr_control_data->vr_txdescs[(x)])
242 #define VR_CDRX(sc, x) (&(sc)->vr_control_data->vr_rxdescs[(x)])
243
244 #define VR_DSTX(sc, x) (&(sc)->vr_txsoft[(x)])
245 #define VR_DSRX(sc, x) (&(sc)->vr_rxsoft[(x)])
246
247 #define VR_CDTXSYNC(sc, x, ops) \
248 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \
249 VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops))
250
251 #define VR_CDRXSYNC(sc, x, ops) \
252 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \
253 VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops))
254
255 /*
256 * Note we rely on MCLBYTES being a power of two below.
257 */
258 #define VR_INIT_RXDESC(sc, i) \
259 do { \
260 struct vr_desc *__d = VR_CDRX((sc), (i)); \
261 struct vr_descsoft *__ds = VR_DSRX((sc), (i)); \
262 \
263 __d->vr_next = htole32(VR_CDRXADDR((sc), VR_NEXTRX((i)))); \
264 __d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG | \
265 VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN); \
266 __d->vr_data = htole32(__ds->ds_dmamap->dm_segs[0].ds_addr); \
267 __d->vr_ctl = htole32(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR | \
268 ((MCLBYTES - 1) & VR_RXCTL_BUFLEN)); \
269 VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
270 } while (0)
271
272 /*
273 * register space access macros
274 */
275 #define CSR_WRITE_4(sc, reg, val) \
276 bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
277 #define CSR_WRITE_2(sc, reg, val) \
278 bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
279 #define CSR_WRITE_1(sc, reg, val) \
280 bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
281
282 #define CSR_READ_4(sc, reg) \
283 bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
284 #define CSR_READ_2(sc, reg) \
285 bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
286 #define CSR_READ_1(sc, reg) \
287 bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
288
289 #define VR_TIMEOUT 1000
290
291 static int vr_add_rxbuf __P((struct vr_softc *, int));
292
293 static void vr_rxeof __P((struct vr_softc *));
294 static void vr_rxeoc __P((struct vr_softc *));
295 static void vr_txeof __P((struct vr_softc *));
296 static int vr_intr __P((void *));
297 static void vr_start __P((struct ifnet *));
298 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t));
299 static int vr_init __P((struct vr_softc *));
300 static void vr_stop __P((struct vr_softc *, int));
301 static void vr_rxdrain __P((struct vr_softc *));
302 static void vr_watchdog __P((struct ifnet *));
303 static void vr_tick __P((void *));
304
305 static int vr_ifmedia_upd __P((struct ifnet *));
306 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
307
308 static int vr_mii_readreg __P((struct device *, int, int));
309 static void vr_mii_writereg __P((struct device *, int, int, int));
310 static void vr_mii_statchg __P((struct device *));
311
312 static u_int8_t vr_calchash __P((u_int8_t *));
313 static void vr_setmulti __P((struct vr_softc *));
314 static void vr_reset __P((struct vr_softc *));
315
316 int vr_copy_small = 0;
317
318 #define VR_SETBIT(sc, reg, x) \
319 CSR_WRITE_1(sc, reg, \
320 CSR_READ_1(sc, reg) | x)
321
322 #define VR_CLRBIT(sc, reg, x) \
323 CSR_WRITE_1(sc, reg, \
324 CSR_READ_1(sc, reg) & ~x)
325
326 #define VR_SETBIT16(sc, reg, x) \
327 CSR_WRITE_2(sc, reg, \
328 CSR_READ_2(sc, reg) | x)
329
330 #define VR_CLRBIT16(sc, reg, x) \
331 CSR_WRITE_2(sc, reg, \
332 CSR_READ_2(sc, reg) & ~x)
333
334 #define VR_SETBIT32(sc, reg, x) \
335 CSR_WRITE_4(sc, reg, \
336 CSR_READ_4(sc, reg) | x)
337
338 #define VR_CLRBIT32(sc, reg, x) \
339 CSR_WRITE_4(sc, reg, \
340 CSR_READ_4(sc, reg) & ~x)
341
342 /*
343 * MII bit-bang glue.
344 */
345 u_int32_t vr_mii_bitbang_read __P((struct device *));
346 void vr_mii_bitbang_write __P((struct device *, u_int32_t));
347
348 const struct mii_bitbang_ops vr_mii_bitbang_ops = {
349 vr_mii_bitbang_read,
350 vr_mii_bitbang_write,
351 {
352 VR_MIICMD_DATAOUT, /* MII_BIT_MDO */
353 VR_MIICMD_DATAIN, /* MII_BIT_MDI */
354 VR_MIICMD_CLK, /* MII_BIT_MDC */
355 VR_MIICMD_DIR, /* MII_BIT_DIR_HOST_PHY */
356 0, /* MII_BIT_DIR_PHY_HOST */
357 }
358 };
359
360 u_int32_t
361 vr_mii_bitbang_read(self)
362 struct device *self;
363 {
364 struct vr_softc *sc = (void *) self;
365
366 return (CSR_READ_1(sc, VR_MIICMD));
367 }
368
369 void
370 vr_mii_bitbang_write(self, val)
371 struct device *self;
372 u_int32_t val;
373 {
374 struct vr_softc *sc = (void *) self;
375
376 CSR_WRITE_1(sc, VR_MIICMD, (val & 0xff) | VR_MIICMD_DIRECTPGM);
377 }
378
379 /*
380 * Read an PHY register through the MII.
381 */
382 static int
383 vr_mii_readreg(self, phy, reg)
384 struct device *self;
385 int phy, reg;
386 {
387 struct vr_softc *sc = (void *) self;
388
389 CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
390 return (mii_bitbang_readreg(self, &vr_mii_bitbang_ops, phy, reg));
391 }
392
393 /*
394 * Write to a PHY register through the MII.
395 */
396 static void
397 vr_mii_writereg(self, phy, reg, val)
398 struct device *self;
399 int phy, reg, val;
400 {
401 struct vr_softc *sc = (void *) self;
402
403 CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
404 mii_bitbang_writereg(self, &vr_mii_bitbang_ops, phy, reg, val);
405 }
406
407 static void
408 vr_mii_statchg(self)
409 struct device *self;
410 {
411 struct vr_softc *sc = (struct vr_softc *)self;
412
413 /*
414 * In order to fiddle with the 'full-duplex' bit in the netconfig
415 * register, we first have to put the transmit and/or receive logic
416 * in the idle state.
417 */
418 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
419
420 if (sc->vr_mii.mii_media_active & IFM_FDX)
421 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
422 else
423 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
424
425 if (sc->vr_ec.ec_if.if_flags & IFF_RUNNING)
426 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
427
428 /* XXX Update ifp->if_baudrate */
429 }
430
431 /*
432 * Calculate CRC of a multicast group address, return the lower 6 bits.
433 */
434 static u_int8_t
435 vr_calchash(addr)
436 u_int8_t *addr;
437 {
438 u_int32_t crc, carry;
439 int i, j;
440 u_int8_t c;
441
442 /* Compute CRC for the address value. */
443 crc = 0xFFFFFFFF; /* initial value */
444
445 for (i = 0; i < 6; i++) {
446 c = *(addr + i);
447 for (j = 0; j < 8; j++) {
448 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
449 crc <<= 1;
450 c >>= 1;
451 if (carry)
452 crc = (crc ^ 0x04c11db6) | carry;
453 }
454 }
455
456 /* return the filter bit position */
457 return ((crc >> 26) & 0x0000003F);
458 }
459
460 /*
461 * Program the 64-bit multicast hash filter.
462 */
463 static void
464 vr_setmulti(sc)
465 struct vr_softc *sc;
466 {
467 struct ifnet *ifp;
468 int h = 0;
469 u_int32_t hashes[2] = { 0, 0 };
470 struct ether_multistep step;
471 struct ether_multi *enm;
472 int mcnt = 0;
473 u_int8_t rxfilt;
474
475 ifp = &sc->vr_ec.ec_if;
476
477 rxfilt = CSR_READ_1(sc, VR_RXCFG);
478
479 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
480 rxfilt |= VR_RXCFG_RX_MULTI;
481 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
482 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
483 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
484 return;
485 }
486
487 /* first, zot all the existing hash bits */
488 CSR_WRITE_4(sc, VR_MAR0, 0);
489 CSR_WRITE_4(sc, VR_MAR1, 0);
490
491 /* now program new ones */
492 ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
493 while (enm != NULL) {
494 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0)
495 continue;
496
497 h = vr_calchash(enm->enm_addrlo);
498
499 if (h < 32)
500 hashes[0] |= (1 << h);
501 else
502 hashes[1] |= (1 << (h - 32));
503 ETHER_NEXT_MULTI(step, enm);
504 mcnt++;
505 }
506
507 if (mcnt)
508 rxfilt |= VR_RXCFG_RX_MULTI;
509 else
510 rxfilt &= ~VR_RXCFG_RX_MULTI;
511
512 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
513 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
514 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
515 }
516
517 static void
518 vr_reset(sc)
519 struct vr_softc *sc;
520 {
521 int i;
522
523 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
524
525 for (i = 0; i < VR_TIMEOUT; i++) {
526 DELAY(10);
527 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
528 break;
529 }
530 if (i == VR_TIMEOUT)
531 printf("%s: reset never completed!\n",
532 sc->vr_dev.dv_xname);
533
534 /* Wait a little while for the chip to get its brains in order. */
535 DELAY(1000);
536 }
537
538 /*
539 * Initialize an RX descriptor and attach an MBUF cluster.
540 * Note: the length fields are only 11 bits wide, which means the
541 * largest size we can specify is 2047. This is important because
542 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
543 * overflow the field and make a mess.
544 */
545 static int
546 vr_add_rxbuf(sc, i)
547 struct vr_softc *sc;
548 int i;
549 {
550 struct vr_descsoft *ds = VR_DSRX(sc, i);
551 struct mbuf *m_new;
552 int error;
553
554 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
555 if (m_new == NULL)
556 return (ENOBUFS);
557
558 MCLGET(m_new, M_DONTWAIT);
559 if ((m_new->m_flags & M_EXT) == 0) {
560 m_freem(m_new);
561 return (ENOBUFS);
562 }
563
564 if (ds->ds_mbuf != NULL)
565 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
566
567 ds->ds_mbuf = m_new;
568
569 error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap,
570 m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
571 if (error) {
572 printf("%s: unable to load rx DMA map %d, error = %d\n",
573 sc->vr_dev.dv_xname, i, error);
574 panic("vr_add_rxbuf"); /* XXX */
575 }
576
577 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
578 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
579
580 VR_INIT_RXDESC(sc, i);
581
582 return (0);
583 }
584
585 /*
586 * A frame has been uploaded: pass the resulting mbuf chain up to
587 * the higher level protocols.
588 */
589 static void
590 vr_rxeof(sc)
591 struct vr_softc *sc;
592 {
593 struct ether_header *eh;
594 struct mbuf *m;
595 struct ifnet *ifp;
596 struct vr_desc *d;
597 struct vr_descsoft *ds;
598 int i, total_len;
599 u_int32_t rxstat;
600
601 ifp = &sc->vr_ec.ec_if;
602
603 for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) {
604 d = VR_CDRX(sc, i);
605 ds = VR_DSRX(sc, i);
606
607 VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
608
609 rxstat = le32toh(d->vr_status);
610
611 if (rxstat & VR_RXSTAT_OWN) {
612 /*
613 * We have processed all of the receive buffers.
614 */
615 break;
616 }
617
618 /*
619 * If an error occurs, update stats, clear the
620 * status word and leave the mbuf cluster in place:
621 * it should simply get re-used next time this descriptor
622 * comes up in the ring.
623 */
624 if (rxstat & VR_RXSTAT_RXERR) {
625 const char *errstr;
626
627 ifp->if_ierrors++;
628 switch (rxstat & 0x000000FF) {
629 case VR_RXSTAT_CRCERR:
630 errstr = "crc error";
631 break;
632 case VR_RXSTAT_FRAMEALIGNERR:
633 errstr = "frame alignment error";
634 break;
635 case VR_RXSTAT_FIFOOFLOW:
636 errstr = "FIFO overflow";
637 break;
638 case VR_RXSTAT_GIANT:
639 errstr = "received giant packet";
640 break;
641 case VR_RXSTAT_RUNT:
642 errstr = "received runt packet";
643 break;
644 case VR_RXSTAT_BUSERR:
645 errstr = "system bus error";
646 break;
647 case VR_RXSTAT_BUFFERR:
648 errstr = "rx buffer error";
649 break;
650 default:
651 errstr = "unknown rx error";
652 break;
653 }
654 printf("%s: receive error: %s\n", sc->vr_dev.dv_xname,
655 errstr);
656
657 VR_INIT_RXDESC(sc, i);
658
659 continue;
660 }
661
662 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
663 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
664
665 /* No errors; receive the packet. */
666 total_len = VR_RXBYTES(le32toh(d->vr_status));
667
668 /*
669 * XXX The VIA Rhine chip includes the CRC with every
670 * received frame, and there's no way to turn this
671 * behavior off (at least, I can't find anything in
672 * the manual that explains how to do it) so we have
673 * to trim off the CRC manually.
674 */
675 total_len -= ETHER_CRC_LEN;
676
677 #ifdef __NO_STRICT_ALIGNMENT
678 /*
679 * If the packet is small enough to fit in a
680 * single header mbuf, allocate one and copy
681 * the data into it. This greatly reduces
682 * memory consumption when we receive lots
683 * of small packets.
684 *
685 * Otherwise, we add a new buffer to the receive
686 * chain. If this fails, we drop the packet and
687 * recycle the old buffer.
688 */
689 if (vr_copy_small != 0 && total_len <= MHLEN) {
690 MGETHDR(m, M_DONTWAIT, MT_DATA);
691 if (m == NULL)
692 goto dropit;
693 memcpy(mtod(m, caddr_t),
694 mtod(ds->ds_mbuf, caddr_t), total_len);
695 VR_INIT_RXDESC(sc, i);
696 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
697 ds->ds_dmamap->dm_mapsize,
698 BUS_DMASYNC_PREREAD);
699 } else {
700 m = ds->ds_mbuf;
701 if (vr_add_rxbuf(sc, i) == ENOBUFS) {
702 dropit:
703 ifp->if_ierrors++;
704 VR_INIT_RXDESC(sc, i);
705 bus_dmamap_sync(sc->vr_dmat,
706 ds->ds_dmamap, 0,
707 ds->ds_dmamap->dm_mapsize,
708 BUS_DMASYNC_PREREAD);
709 continue;
710 }
711 }
712 #else
713 /*
714 * The Rhine's packet buffers must be 4-byte aligned.
715 * But this means that the data after the Ethernet header
716 * is misaligned. We must allocate a new buffer and
717 * copy the data, shifted forward 2 bytes.
718 */
719 MGETHDR(m, M_DONTWAIT, MT_DATA);
720 if (m == NULL) {
721 dropit:
722 ifp->if_ierrors++;
723 VR_INIT_RXDESC(sc, i);
724 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
725 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
726 continue;
727 }
728 if (total_len > (MHLEN - 2)) {
729 MCLGET(m, M_DONTWAIT);
730 if ((m->m_flags & M_EXT) == 0) {
731 m_freem(m);
732 goto dropit;
733 }
734 }
735 m->m_data += 2;
736
737 /*
738 * Note that we use clusters for incoming frames, so the
739 * buffer is virtually contiguous.
740 */
741 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t),
742 total_len);
743
744 /* Allow the recieve descriptor to continue using its mbuf. */
745 VR_INIT_RXDESC(sc, i);
746 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
747 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
748 #endif /* __NO_STRICT_ALIGNMENT */
749
750 ifp->if_ipackets++;
751 eh = mtod(m, struct ether_header *);
752 m->m_pkthdr.rcvif = ifp;
753 m->m_pkthdr.len = m->m_len = total_len;
754 #if NBPFILTER > 0
755 /*
756 * Handle BPF listeners. Let the BPF user see the packet, but
757 * don't pass it up to the ether_input() layer unless it's
758 * a broadcast packet, multicast packet, matches our ethernet
759 * address or the interface is in promiscuous mode.
760 */
761 if (ifp->if_bpf) {
762 bpf_mtap(ifp->if_bpf, m);
763 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
764 ETHER_IS_MULTICAST(eh->ether_dhost) == 0 &&
765 memcmp(eh->ether_dhost, LLADDR(ifp->if_sadl),
766 ETHER_ADDR_LEN) != 0) {
767 m_freem(m);
768 continue;
769 }
770 }
771 #endif
772 /* Pass it on. */
773 (*ifp->if_input)(ifp, m);
774 }
775
776 /* Update the receive pointer. */
777 sc->vr_rxptr = i;
778 }
779
780 void
781 vr_rxeoc(sc)
782 struct vr_softc *sc;
783 {
784
785 vr_rxeof(sc);
786 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
787 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
788 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
789 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
790 }
791
792 /*
793 * A frame was downloaded to the chip. It's safe for us to clean up
794 * the list buffers.
795 */
796 static void
797 vr_txeof(sc)
798 struct vr_softc *sc;
799 {
800 struct ifnet *ifp = &sc->vr_ec.ec_if;
801 struct vr_desc *d;
802 struct vr_descsoft *ds;
803 u_int32_t txstat;
804 int i;
805
806 ifp->if_flags &= ~IFF_OACTIVE;
807
808 /*
809 * Go through our tx list and free mbufs for those
810 * frames that have been transmitted.
811 */
812 for (i = sc->vr_txdirty; sc->vr_txpending != 0;
813 i = VR_NEXTTX(i), sc->vr_txpending--) {
814 d = VR_CDTX(sc, i);
815 ds = VR_DSTX(sc, i);
816
817 VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
818
819 txstat = le32toh(d->vr_status);
820 if (txstat & VR_TXSTAT_OWN)
821 break;
822
823 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap,
824 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
825 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
826 m_freem(ds->ds_mbuf);
827 ds->ds_mbuf = NULL;
828
829 if (txstat & VR_TXSTAT_ERRSUM) {
830 ifp->if_oerrors++;
831 if (txstat & VR_TXSTAT_DEFER)
832 ifp->if_collisions++;
833 if (txstat & VR_TXSTAT_LATECOLL)
834 ifp->if_collisions++;
835 }
836
837 ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3;
838 ifp->if_opackets++;
839 }
840
841 /* Update the dirty transmit buffer pointer. */
842 sc->vr_txdirty = i;
843
844 /*
845 * Cancel the watchdog timer if there are no pending
846 * transmissions.
847 */
848 if (sc->vr_txpending == 0)
849 ifp->if_timer = 0;
850 }
851
852 static int
853 vr_intr(arg)
854 void *arg;
855 {
856 struct vr_softc *sc;
857 struct ifnet *ifp;
858 u_int16_t status;
859 int handled = 0, dotx = 0;
860
861 sc = arg;
862 ifp = &sc->vr_ec.ec_if;
863
864 /* Suppress unwanted interrupts. */
865 if ((ifp->if_flags & IFF_UP) == 0) {
866 vr_stop(sc, 1);
867 return (0);
868 }
869
870 /* Disable interrupts. */
871 CSR_WRITE_2(sc, VR_IMR, 0x0000);
872
873 for (;;) {
874 status = CSR_READ_2(sc, VR_ISR);
875 if (status)
876 CSR_WRITE_2(sc, VR_ISR, status);
877
878 if ((status & VR_INTRS) == 0)
879 break;
880
881 handled = 1;
882
883 if (status & VR_ISR_RX_OK)
884 vr_rxeof(sc);
885
886 if (status &
887 (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW |
888 VR_ISR_RX_DROPPED))
889 vr_rxeoc(sc);
890
891 if (status & VR_ISR_TX_OK) {
892 dotx = 1;
893 vr_txeof(sc);
894 }
895
896 if (status & (VR_ISR_TX_UNDERRUN | VR_ISR_TX_ABRT)) {
897 if (status & VR_ISR_TX_UNDERRUN)
898 printf("%s: transmit underrun\n",
899 sc->vr_dev.dv_xname);
900 if (status & VR_ISR_TX_ABRT)
901 printf("%s: transmit aborted\n",
902 sc->vr_dev.dv_xname);
903 ifp->if_oerrors++;
904 dotx = 1;
905 vr_txeof(sc);
906 if (sc->vr_txpending) {
907 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
908 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
909 }
910 }
911
912 if (status & VR_ISR_BUSERR) {
913 printf("%s: PCI bus error\n", sc->vr_dev.dv_xname);
914 /* vr_init() calls vr_start() */
915 dotx = 0;
916 (void) vr_init(sc);
917 }
918 }
919
920 /* Re-enable interrupts. */
921 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
922
923 if (dotx)
924 vr_start(ifp);
925
926 return (handled);
927 }
928
929 /*
930 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
931 * to the mbuf data regions directly in the transmit lists. We also save a
932 * copy of the pointers since the transmit list fragment pointers are
933 * physical addresses.
934 */
935 static void
936 vr_start(ifp)
937 struct ifnet *ifp;
938 {
939 struct vr_softc *sc = ifp->if_softc;
940 struct mbuf *m0, *m;
941 struct vr_desc *d;
942 struct vr_descsoft *ds;
943 int error, firsttx, nexttx, opending;
944
945 /*
946 * Remember the previous txpending and the first transmit
947 * descriptor we use.
948 */
949 opending = sc->vr_txpending;
950 firsttx = VR_NEXTTX(sc->vr_txlast);
951
952 /*
953 * Loop through the send queue, setting up transmit descriptors
954 * until we drain the queue, or use up all available transmit
955 * descriptors.
956 */
957 while (sc->vr_txpending < VR_NTXDESC) {
958 /*
959 * Grab a packet off the queue.
960 */
961 IF_DEQUEUE(&ifp->if_snd, m0);
962 if (m0 == NULL)
963 break;
964
965 /*
966 * Get the next available transmit descriptor.
967 */
968 nexttx = VR_NEXTTX(sc->vr_txlast);
969 d = VR_CDTX(sc, nexttx);
970 ds = VR_DSTX(sc, nexttx);
971
972 /*
973 * Load the DMA map. If this fails, the packet didn't
974 * fit in one DMA segment, and we need to copy. Note,
975 * the packet must also be aligned.
976 */
977 if ((mtod(m0, bus_addr_t) & 3) != 0 ||
978 bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0,
979 BUS_DMA_NOWAIT) != 0) {
980 MGETHDR(m, M_DONTWAIT, MT_DATA);
981 if (m == NULL) {
982 printf("%s: unable to allocate Tx mbuf\n",
983 sc->vr_dev.dv_xname);
984 IF_PREPEND(&ifp->if_snd, m0);
985 break;
986 }
987 if (m0->m_pkthdr.len > MHLEN) {
988 MCLGET(m, M_DONTWAIT);
989 if ((m->m_flags & M_EXT) == 0) {
990 printf("%s: unable to allocate Tx "
991 "cluster\n", sc->vr_dev.dv_xname);
992 m_freem(m);
993 IF_PREPEND(&ifp->if_snd, m0);
994 break;
995 }
996 }
997 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
998 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
999 m_freem(m0);
1000 m0 = m;
1001 error = bus_dmamap_load_mbuf(sc->vr_dmat,
1002 ds->ds_dmamap, m0, BUS_DMA_NOWAIT);
1003 if (error) {
1004 printf("%s: unable to load Tx buffer, "
1005 "error = %d\n", sc->vr_dev.dv_xname, error);
1006 IF_PREPEND(&ifp->if_snd, m0);
1007 break;
1008 }
1009 }
1010
1011 /* Sync the DMA map. */
1012 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
1013 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1014
1015 /*
1016 * Store a pointer to the packet so we can free it later.
1017 */
1018 ds->ds_mbuf = m0;
1019
1020 #if NBPFILTER > 0
1021 /*
1022 * If there's a BPF listener, bounce a copy of this frame
1023 * to him.
1024 */
1025 if (ifp->if_bpf)
1026 bpf_mtap(ifp->if_bpf, m0);
1027 #endif
1028
1029 /*
1030 * Fill in the transmit descriptor. The Rhine
1031 * doesn't auto-pad, so we have to do this ourselves.
1032 */
1033 d->vr_data = htole32(ds->ds_dmamap->dm_segs[0].ds_addr);
1034 d->vr_ctl = htole32(m0->m_pkthdr.len < VR_MIN_FRAMELEN ?
1035 VR_MIN_FRAMELEN : m0->m_pkthdr.len);
1036 d->vr_ctl |=
1037 htole32(VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG|
1038 VR_TXCTL_LASTFRAG);
1039
1040 /*
1041 * If this is the first descriptor we're enqueuing,
1042 * don't give it to the Rhine yet. That could cause
1043 * a race condition. We'll do it below.
1044 */
1045 if (nexttx == firsttx)
1046 d->vr_status = 0;
1047 else
1048 d->vr_status = htole32(VR_TXSTAT_OWN);
1049
1050 VR_CDTXSYNC(sc, nexttx,
1051 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1052
1053 /* Advance the tx pointer. */
1054 sc->vr_txpending++;
1055 sc->vr_txlast = nexttx;
1056 }
1057
1058 if (sc->vr_txpending == VR_NTXDESC) {
1059 /* No more slots left; notify upper layer. */
1060 ifp->if_flags |= IFF_OACTIVE;
1061 }
1062
1063 if (sc->vr_txpending != opending) {
1064 /*
1065 * We enqueued packets. If the transmitter was idle,
1066 * reset the txdirty pointer.
1067 */
1068 if (opending == 0)
1069 sc->vr_txdirty = firsttx;
1070
1071 /*
1072 * Cause a transmit interrupt to happen on the
1073 * last packet we enqueued.
1074 */
1075 VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htole32(VR_TXCTL_FINT);
1076 VR_CDTXSYNC(sc, sc->vr_txlast,
1077 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1078
1079 /*
1080 * The entire packet chain is set up. Give the
1081 * first descriptor to the Rhine now.
1082 */
1083 VR_CDTX(sc, firsttx)->vr_status = htole32(VR_TXSTAT_OWN);
1084 VR_CDTXSYNC(sc, firsttx,
1085 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1086
1087 /* Start the transmitter. */
1088 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1089
1090 /* Set the watchdog timer in case the chip flakes out. */
1091 ifp->if_timer = 5;
1092 }
1093 }
1094
1095 /*
1096 * Initialize the interface. Must be called at splnet.
1097 */
1098 static int
1099 vr_init(sc)
1100 struct vr_softc *sc;
1101 {
1102 struct ifnet *ifp = &sc->vr_ec.ec_if;
1103 struct vr_desc *d;
1104 struct vr_descsoft *ds;
1105 int i, error = 0;
1106
1107 /* Cancel pending I/O. */
1108 vr_stop(sc, 0);
1109
1110 /* Reset the Rhine to a known state. */
1111 vr_reset(sc);
1112
1113 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1114 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1115
1116 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1117 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1118
1119 /*
1120 * Initialize the transmit desciptor ring. txlast is initialized
1121 * to the end of the list so that it will wrap around to the first
1122 * descriptor when the first packet is transmitted.
1123 */
1124 for (i = 0; i < VR_NTXDESC; i++) {
1125 d = VR_CDTX(sc, i);
1126 memset(d, 0, sizeof(struct vr_desc));
1127 d->vr_next = htole32(VR_CDTXADDR(sc, VR_NEXTTX(i)));
1128 VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1129 }
1130 sc->vr_txpending = 0;
1131 sc->vr_txdirty = 0;
1132 sc->vr_txlast = VR_NTXDESC - 1;
1133
1134 /*
1135 * Initialize the receive descriptor ring.
1136 */
1137 for (i = 0; i < VR_NRXDESC; i++) {
1138 ds = VR_DSRX(sc, i);
1139 if (ds->ds_mbuf == NULL) {
1140 if ((error = vr_add_rxbuf(sc, i)) != 0) {
1141 printf("%s: unable to allocate or map rx "
1142 "buffer %d, error = %d\n",
1143 sc->vr_dev.dv_xname, i, error);
1144 /*
1145 * XXX Should attempt to run with fewer receive
1146 * XXX buffers instead of just failing.
1147 */
1148 vr_rxdrain(sc);
1149 goto out;
1150 }
1151 }
1152 }
1153 sc->vr_rxptr = 0;
1154
1155 /* If we want promiscuous mode, set the allframes bit. */
1156 if (ifp->if_flags & IFF_PROMISC)
1157 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1158 else
1159 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1160
1161 /* Set capture broadcast bit to capture broadcast frames. */
1162 if (ifp->if_flags & IFF_BROADCAST)
1163 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1164 else
1165 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1166
1167 /* Program the multicast filter, if necessary. */
1168 vr_setmulti(sc);
1169
1170 /* Give the transmit and recieve rings to the Rhine. */
1171 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
1172 CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast)));
1173
1174 /* Set current media. */
1175 mii_mediachg(&sc->vr_mii);
1176
1177 /* Enable receiver and transmitter. */
1178 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1179 VR_CMD_TX_ON|VR_CMD_RX_ON|
1180 VR_CMD_RX_GO);
1181
1182 /* Enable interrupts. */
1183 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1184 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1185
1186 ifp->if_flags |= IFF_RUNNING;
1187 ifp->if_flags &= ~IFF_OACTIVE;
1188
1189 /* Start one second timer. */
1190 timeout(vr_tick, sc, hz);
1191
1192 /* Attempt to start output on the interface. */
1193 vr_start(ifp);
1194
1195 out:
1196 if (error)
1197 printf("%s: interface not running\n", sc->vr_dev.dv_xname);
1198 return (error);
1199 }
1200
1201 /*
1202 * Set media options.
1203 */
1204 static int
1205 vr_ifmedia_upd(ifp)
1206 struct ifnet *ifp;
1207 {
1208 struct vr_softc *sc = ifp->if_softc;
1209
1210 if (ifp->if_flags & IFF_UP)
1211 mii_mediachg(&sc->vr_mii);
1212 return (0);
1213 }
1214
1215 /*
1216 * Report current media status.
1217 */
1218 static void
1219 vr_ifmedia_sts(ifp, ifmr)
1220 struct ifnet *ifp;
1221 struct ifmediareq *ifmr;
1222 {
1223 struct vr_softc *sc = ifp->if_softc;
1224
1225 mii_pollstat(&sc->vr_mii);
1226 ifmr->ifm_status = sc->vr_mii.mii_media_status;
1227 ifmr->ifm_active = sc->vr_mii.mii_media_active;
1228 }
1229
1230 static int
1231 vr_ioctl(ifp, command, data)
1232 struct ifnet *ifp;
1233 u_long command;
1234 caddr_t data;
1235 {
1236 struct vr_softc *sc = ifp->if_softc;
1237 struct ifreq *ifr = (struct ifreq *)data;
1238 struct ifaddr *ifa = (struct ifaddr *)data;
1239 int s, error = 0;
1240
1241 s = splnet();
1242
1243 switch (command) {
1244 case SIOCSIFADDR:
1245 ifp->if_flags |= IFF_UP;
1246
1247 switch (ifa->ifa_addr->sa_family) {
1248 #ifdef INET
1249 case AF_INET:
1250 if ((error = vr_init(sc)) != 0)
1251 break;
1252 arp_ifinit(ifp, ifa);
1253 break;
1254 #endif /* INET */
1255 default:
1256 error = vr_init(sc);
1257 break;
1258 }
1259 break;
1260
1261 case SIOCGIFADDR:
1262 bcopy((caddr_t) sc->vr_enaddr,
1263 (caddr_t) ((struct sockaddr *)&ifr->ifr_data)->sa_data,
1264 ETHER_ADDR_LEN);
1265 break;
1266
1267 case SIOCSIFMTU:
1268 if (ifr->ifr_mtu > ETHERMTU)
1269 error = EINVAL;
1270 else
1271 ifp->if_mtu = ifr->ifr_mtu;
1272 break;
1273
1274 case SIOCSIFFLAGS:
1275 if ((ifp->if_flags & IFF_UP) == 0 &&
1276 (ifp->if_flags & IFF_RUNNING) != 0) {
1277 /*
1278 * If interface is marked down and it is running, then
1279 * stop it.
1280 */
1281 vr_stop(sc, 1);
1282 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1283 (ifp->if_flags & IFF_RUNNING) == 0) {
1284 /*
1285 * If interface is marked up and it is stopped, then
1286 * start it.
1287 */
1288 error = vr_init(sc);
1289 } else if ((ifp->if_flags & IFF_UP) != 0) {
1290 /*
1291 * Reset the interface to pick up changes in any other
1292 * flags that affect the hardware state.
1293 */
1294 error = vr_init(sc);
1295 }
1296 break;
1297
1298 case SIOCADDMULTI:
1299 case SIOCDELMULTI:
1300 if (command == SIOCADDMULTI)
1301 error = ether_addmulti(ifr, &sc->vr_ec);
1302 else
1303 error = ether_delmulti(ifr, &sc->vr_ec);
1304
1305 if (error == ENETRESET) {
1306 /*
1307 * Multicast list has changed; set the hardware filter
1308 * accordingly.
1309 */
1310 vr_setmulti(sc);
1311 error = 0;
1312 }
1313 break;
1314
1315 case SIOCGIFMEDIA:
1316 case SIOCSIFMEDIA:
1317 error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
1318 break;
1319
1320 default:
1321 error = EINVAL;
1322 break;
1323 }
1324
1325 splx(s);
1326 return (error);
1327 }
1328
1329 static void
1330 vr_watchdog(ifp)
1331 struct ifnet *ifp;
1332 {
1333 struct vr_softc *sc = ifp->if_softc;
1334
1335 printf("%s: device timeout\n", sc->vr_dev.dv_xname);
1336 ifp->if_oerrors++;
1337
1338 (void) vr_init(sc);
1339 }
1340
1341 /*
1342 * One second timer, used to tick MII.
1343 */
1344 static void
1345 vr_tick(arg)
1346 void *arg;
1347 {
1348 struct vr_softc *sc = arg;
1349 int s;
1350
1351 s = splnet();
1352 mii_tick(&sc->vr_mii);
1353 splx(s);
1354
1355 timeout(vr_tick, sc, hz);
1356 }
1357
1358 /*
1359 * Drain the receive queue.
1360 */
1361 static void
1362 vr_rxdrain(sc)
1363 struct vr_softc *sc;
1364 {
1365 struct vr_descsoft *ds;
1366 int i;
1367
1368 for (i = 0; i < VR_NRXDESC; i++) {
1369 ds = VR_DSRX(sc, i);
1370 if (ds->ds_mbuf != NULL) {
1371 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1372 m_freem(ds->ds_mbuf);
1373 ds->ds_mbuf = NULL;
1374 }
1375 }
1376 }
1377
1378 /*
1379 * Stop the adapter and free any mbufs allocated to the
1380 * transmit lists.
1381 */
1382 static void
1383 vr_stop(sc, drain)
1384 struct vr_softc *sc;
1385 int drain;
1386 {
1387 struct vr_descsoft *ds;
1388 struct ifnet *ifp;
1389 int i;
1390
1391 /* Cancel one second timer. */
1392 untimeout(vr_tick, sc);
1393
1394 /* Down the MII. */
1395 mii_down(&sc->vr_mii);
1396
1397 ifp = &sc->vr_ec.ec_if;
1398 ifp->if_timer = 0;
1399
1400 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1401 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1402 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1403 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1404 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1405
1406 /*
1407 * Release any queued transmit buffers.
1408 */
1409 for (i = 0; i < VR_NTXDESC; i++) {
1410 ds = VR_DSTX(sc, i);
1411 if (ds->ds_mbuf != NULL) {
1412 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1413 m_freem(ds->ds_mbuf);
1414 ds->ds_mbuf = NULL;
1415 }
1416 }
1417
1418 if (drain) {
1419 /*
1420 * Release the receive buffers.
1421 */
1422 vr_rxdrain(sc);
1423 }
1424
1425 /*
1426 * Mark the interface down and cancel the watchdog timer.
1427 */
1428 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1429 ifp->if_timer = 0;
1430 }
1431
1432 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1433 static int vr_probe __P((struct device *, struct cfdata *, void *));
1434 static void vr_attach __P((struct device *, struct device *, void *));
1435 static void vr_shutdown __P((void *));
1436
1437 struct cfattach vr_ca = {
1438 sizeof (struct vr_softc), vr_probe, vr_attach
1439 };
1440
1441 static struct vr_type *
1442 vr_lookup(pa)
1443 struct pci_attach_args *pa;
1444 {
1445 struct vr_type *vrt;
1446
1447 for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1448 if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1449 PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1450 return (vrt);
1451 }
1452 return (NULL);
1453 }
1454
1455 static int
1456 vr_probe(parent, match, aux)
1457 struct device *parent;
1458 struct cfdata *match;
1459 void *aux;
1460 {
1461 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1462
1463 if (vr_lookup(pa) != NULL)
1464 return (1);
1465
1466 return (0);
1467 }
1468
1469 /*
1470 * Stop all chip I/O so that the kernel's probe routines don't
1471 * get confused by errant DMAs when rebooting.
1472 */
1473 static void
1474 vr_shutdown(arg)
1475 void *arg;
1476 {
1477 struct vr_softc *sc = (struct vr_softc *)arg;
1478
1479 vr_stop(sc, 1);
1480 }
1481
1482 /*
1483 * Attach the interface. Allocate softc structures, do ifmedia
1484 * setup and ethernet/BPF attach.
1485 */
1486 static void
1487 vr_attach(parent, self, aux)
1488 struct device *parent;
1489 struct device *self;
1490 void *aux;
1491 {
1492 struct vr_softc *sc = (struct vr_softc *) self;
1493 struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1494 bus_dma_segment_t seg;
1495 struct vr_type *vrt;
1496 u_int32_t command;
1497 struct ifnet *ifp;
1498 u_char eaddr[ETHER_ADDR_LEN];
1499 int i, rseg, error;
1500
1501 #define PCI_CONF_WRITE(r, v) pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1502 #define PCI_CONF_READ(r) pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1503
1504 vrt = vr_lookup(pa);
1505 if (vrt == NULL) {
1506 printf("\n");
1507 panic("vr_attach: impossible");
1508 }
1509
1510 printf(": %s Ethernet\n", vrt->vr_name);
1511
1512 /*
1513 * Handle power management nonsense.
1514 */
1515
1516 command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
1517 if (command == 0x01) {
1518 command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
1519 if (command & VR_PSTATE_MASK) {
1520 u_int32_t iobase, membase, irq;
1521
1522 /* Save important PCI config data. */
1523 iobase = PCI_CONF_READ(VR_PCI_LOIO);
1524 membase = PCI_CONF_READ(VR_PCI_LOMEM);
1525 irq = PCI_CONF_READ(VR_PCI_INTLINE);
1526
1527 /* Reset the power state. */
1528 printf("%s: chip is in D%d power mode "
1529 "-- setting to D0\n",
1530 sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
1531 command &= 0xFFFFFFFC;
1532 PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
1533
1534 /* Restore PCI config data. */
1535 PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1536 PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1537 PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
1538 }
1539 }
1540
1541 /* Make sure bus mastering is enabled. */
1542 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1543 command |= PCI_COMMAND_MASTER_ENABLE;
1544 PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
1545
1546 /*
1547 * Map control/status registers.
1548 */
1549 {
1550 bus_space_tag_t iot, memt;
1551 bus_space_handle_t ioh, memh;
1552 int ioh_valid, memh_valid;
1553 pci_intr_handle_t intrhandle;
1554 const char *intrstr;
1555
1556 ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1557 PCI_MAPREG_TYPE_IO, 0,
1558 &iot, &ioh, NULL, NULL) == 0);
1559 memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1560 PCI_MAPREG_TYPE_MEM |
1561 PCI_MAPREG_MEM_TYPE_32BIT,
1562 0, &memt, &memh, NULL, NULL) == 0);
1563 #if defined(VR_USEIOSPACE)
1564 if (ioh_valid) {
1565 sc->vr_bst = iot;
1566 sc->vr_bsh = ioh;
1567 } else if (memh_valid) {
1568 sc->vr_bst = memt;
1569 sc->vr_bsh = memh;
1570 }
1571 #else
1572 if (memh_valid) {
1573 sc->vr_bst = memt;
1574 sc->vr_bsh = memh;
1575 } else if (ioh_valid) {
1576 sc->vr_bst = iot;
1577 sc->vr_bsh = ioh;
1578 }
1579 #endif
1580 else {
1581 printf(": unable to map device registers\n");
1582 return;
1583 }
1584
1585 /* Allocate interrupt */
1586 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
1587 pa->pa_intrline, &intrhandle)) {
1588 printf("%s: couldn't map interrupt\n",
1589 sc->vr_dev.dv_xname);
1590 return;
1591 }
1592 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1593 sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1594 vr_intr, sc);
1595 if (sc->vr_ih == NULL) {
1596 printf("%s: couldn't establish interrupt",
1597 sc->vr_dev.dv_xname);
1598 if (intrstr != NULL)
1599 printf(" at %s", intrstr);
1600 printf("\n");
1601 }
1602 printf("%s: interrupting at %s\n",
1603 sc->vr_dev.dv_xname, intrstr);
1604 }
1605
1606 /* Reset the adapter. */
1607 vr_reset(sc);
1608
1609 /*
1610 * Get station address. The way the Rhine chips work,
1611 * you're not allowed to directly access the EEPROM once
1612 * they've been programmed a special way. Consequently,
1613 * we need to read the node address from the PAR0 and PAR1
1614 * registers.
1615 */
1616 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1617 DELAY(200);
1618 for (i = 0; i < ETHER_ADDR_LEN; i++)
1619 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1620
1621 /*
1622 * A Rhine chip was detected. Inform the world.
1623 */
1624 printf("%s: Ethernet address: %s\n",
1625 sc->vr_dev.dv_xname, ether_sprintf(eaddr));
1626
1627 bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN);
1628
1629 sc->vr_dmat = pa->pa_dmat;
1630
1631 /*
1632 * Allocate the control data structures, and create and load
1633 * the DMA map for it.
1634 */
1635 if ((error = bus_dmamem_alloc(sc->vr_dmat,
1636 sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
1637 0)) != 0) {
1638 printf("%s: unable to allocate control data, error = %d\n",
1639 sc->vr_dev.dv_xname, error);
1640 goto fail_0;
1641 }
1642
1643 if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg,
1644 sizeof(struct vr_control_data), (caddr_t *)&sc->vr_control_data,
1645 BUS_DMA_COHERENT)) != 0) {
1646 printf("%s: unable to map control data, error = %d\n",
1647 sc->vr_dev.dv_xname, error);
1648 goto fail_1;
1649 }
1650
1651 if ((error = bus_dmamap_create(sc->vr_dmat,
1652 sizeof(struct vr_control_data), 1,
1653 sizeof(struct vr_control_data), 0, 0,
1654 &sc->vr_cddmamap)) != 0) {
1655 printf("%s: unable to create control data DMA map, "
1656 "error = %d\n", sc->vr_dev.dv_xname, error);
1657 goto fail_2;
1658 }
1659
1660 if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap,
1661 sc->vr_control_data, sizeof(struct vr_control_data), NULL,
1662 0)) != 0) {
1663 printf("%s: unable to load control data DMA map, error = %d\n",
1664 sc->vr_dev.dv_xname, error);
1665 goto fail_3;
1666 }
1667
1668 /*
1669 * Create the transmit buffer DMA maps.
1670 */
1671 for (i = 0; i < VR_NTXDESC; i++) {
1672 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES,
1673 1, MCLBYTES, 0, 0,
1674 &VR_DSTX(sc, i)->ds_dmamap)) != 0) {
1675 printf("%s: unable to create tx DMA map %d, "
1676 "error = %d\n", sc->vr_dev.dv_xname, i, error);
1677 goto fail_4;
1678 }
1679 }
1680
1681 /*
1682 * Create the receive buffer DMA maps.
1683 */
1684 for (i = 0; i < VR_NRXDESC; i++) {
1685 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1,
1686 MCLBYTES, 0, 0,
1687 &VR_DSRX(sc, i)->ds_dmamap)) != 0) {
1688 printf("%s: unable to create rx DMA map %d, "
1689 "error = %d\n", sc->vr_dev.dv_xname, i, error);
1690 goto fail_5;
1691 }
1692 VR_DSRX(sc, i)->ds_mbuf = NULL;
1693 }
1694
1695 ifp = &sc->vr_ec.ec_if;
1696 ifp->if_softc = sc;
1697 ifp->if_mtu = ETHERMTU;
1698 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1699 ifp->if_ioctl = vr_ioctl;
1700 ifp->if_start = vr_start;
1701 ifp->if_watchdog = vr_watchdog;
1702 ifp->if_baudrate = 10000000;
1703 bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1704
1705 /*
1706 * Initialize MII/media info.
1707 */
1708 sc->vr_mii.mii_ifp = ifp;
1709 sc->vr_mii.mii_readreg = vr_mii_readreg;
1710 sc->vr_mii.mii_writereg = vr_mii_writereg;
1711 sc->vr_mii.mii_statchg = vr_mii_statchg;
1712 ifmedia_init(&sc->vr_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1713 mii_phy_probe(&sc->vr_dev, &sc->vr_mii, 0xffffffff, MII_PHY_ANY,
1714 MII_OFFSET_ANY);
1715 if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1716 ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1717 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1718 } else
1719 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1720
1721 /*
1722 * Call MI attach routines.
1723 */
1724 if_attach(ifp);
1725 ether_ifattach(ifp, sc->vr_enaddr);
1726
1727 #if NBPFILTER > 0
1728 bpfattach(&sc->vr_ec.ec_if.if_bpf,
1729 ifp, DLT_EN10MB, sizeof (struct ether_header));
1730 #endif
1731
1732 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1733 if (sc->vr_ats == NULL)
1734 printf("%s: warning: couldn't establish shutdown hook\n",
1735 sc->vr_dev.dv_xname);
1736 return;
1737
1738 fail_5:
1739 for (i = 0; i < VR_NRXDESC; i++) {
1740 if (sc->vr_rxsoft[i].ds_dmamap != NULL)
1741 bus_dmamap_destroy(sc->vr_dmat,
1742 sc->vr_rxsoft[i].ds_dmamap);
1743 }
1744 fail_4:
1745 for (i = 0; i < VR_NTXDESC; i++) {
1746 if (sc->vr_txsoft[i].ds_dmamap != NULL)
1747 bus_dmamap_destroy(sc->vr_dmat,
1748 sc->vr_txsoft[i].ds_dmamap);
1749 }
1750 bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap);
1751 fail_3:
1752 bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap);
1753 fail_2:
1754 bus_dmamem_unmap(sc->vr_dmat, (caddr_t)sc->vr_control_data,
1755 sizeof(struct vr_control_data));
1756 fail_1:
1757 bus_dmamem_free(sc->vr_dmat, &seg, rseg);
1758 fail_0:
1759 return;
1760 }
1761