if_vr.c revision 1.50 1 /* $NetBSD: if_vr.c,v 1.50 2001/07/19 16:36:15 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1997, 1998
42 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 * must display the following acknowledgement:
54 * This product includes software developed by Bill Paul.
55 * 4. Neither the name of the author nor the names of any co-contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
69 * THE POSSIBILITY OF SUCH DAMAGE.
70 *
71 * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
72 */
73
74 /*
75 * VIA Rhine fast ethernet PCI NIC driver
76 *
77 * Supports various network adapters based on the VIA Rhine
78 * and Rhine II PCI controllers, including the D-Link DFE530TX.
79 * Datasheets are available at http://www.via.com.tw.
80 *
81 * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
82 * Electrical Engineering Department
83 * Columbia University, New York City
84 */
85
86 /*
87 * The VIA Rhine controllers are similar in some respects to the
88 * the DEC tulip chips, except less complicated. The controller
89 * uses an MII bus and an external physical layer interface. The
90 * receiver has a one entry perfect filter and a 64-bit hash table
91 * multicast filter. Transmit and receive descriptors are similar
92 * to the tulip.
93 *
94 * The Rhine has a serious flaw in its transmit DMA mechanism:
95 * transmit buffers must be longword aligned. Unfortunately,
96 * the kernel doesn't guarantee that mbufs will be filled in starting
97 * at longword boundaries, so we have to do a buffer copy before
98 * transmission.
99 *
100 * Apparently, the receive DMA mechanism also has the same flaw. This
101 * means that on systems with struct alignment requirements, incoming
102 * frames must be copied to a new buffer which shifts the data forward
103 * 2 bytes so that the payload is aligned on a 4-byte boundary.
104 */
105
106 #include <sys/param.h>
107 #include <sys/systm.h>
108 #include <sys/callout.h>
109 #include <sys/sockio.h>
110 #include <sys/mbuf.h>
111 #include <sys/malloc.h>
112 #include <sys/kernel.h>
113 #include <sys/socket.h>
114 #include <sys/device.h>
115
116 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
117
118 #include <net/if.h>
119 #include <net/if_arp.h>
120 #include <net/if_dl.h>
121 #include <net/if_media.h>
122 #include <net/if_ether.h>
123
124 #include "bpfilter.h"
125 #if NBPFILTER > 0
126 #include <net/bpf.h>
127 #endif
128
129 #include <machine/bus.h>
130 #include <machine/intr.h>
131 #include <machine/endian.h>
132
133 #include <dev/mii/mii.h>
134 #include <dev/mii/miivar.h>
135 #include <dev/mii/mii_bitbang.h>
136
137 #include <dev/pci/pcireg.h>
138 #include <dev/pci/pcivar.h>
139 #include <dev/pci/pcidevs.h>
140
141 #include <dev/pci/if_vrreg.h>
142
143 #define VR_USEIOSPACE
144
145 /*
146 * Various supported device vendors/types and their names.
147 */
148 static struct vr_type {
149 pci_vendor_id_t vr_vid;
150 pci_product_id_t vr_did;
151 const char *vr_name;
152 } vr_devs[] = {
153 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
154 "VIA VT3043 (Rhine) 10/100" },
155 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6102,
156 "VIA VT6102 (Rhine II) 10/100" },
157 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
158 "VIA VT86C100A (Rhine-II) 10/100" },
159 { 0, 0, NULL }
160 };
161
162 /*
163 * Transmit descriptor list size.
164 */
165 #define VR_NTXDESC 64
166 #define VR_NTXDESC_MASK (VR_NTXDESC - 1)
167 #define VR_NEXTTX(x) (((x) + 1) & VR_NTXDESC_MASK)
168
169 /*
170 * Receive descriptor list size.
171 */
172 #define VR_NRXDESC 64
173 #define VR_NRXDESC_MASK (VR_NRXDESC - 1)
174 #define VR_NEXTRX(x) (((x) + 1) & VR_NRXDESC_MASK)
175
176 /*
177 * Control data structres that are DMA'd to the Rhine chip. We allocate
178 * them in a single clump that maps to a single DMA segment to make several
179 * things easier.
180 *
181 * Note that since we always copy outgoing packets to aligned transmit
182 * buffers, we can reduce the transmit descriptors to one per packet.
183 */
184 struct vr_control_data {
185 struct vr_desc vr_txdescs[VR_NTXDESC];
186 struct vr_desc vr_rxdescs[VR_NRXDESC];
187 };
188
189 #define VR_CDOFF(x) offsetof(struct vr_control_data, x)
190 #define VR_CDTXOFF(x) VR_CDOFF(vr_txdescs[(x)])
191 #define VR_CDRXOFF(x) VR_CDOFF(vr_rxdescs[(x)])
192
193 /*
194 * Software state of transmit and receive descriptors.
195 */
196 struct vr_descsoft {
197 struct mbuf *ds_mbuf; /* head of mbuf chain */
198 bus_dmamap_t ds_dmamap; /* our DMA map */
199 };
200
201 struct vr_softc {
202 struct device vr_dev; /* generic device glue */
203 void *vr_ih; /* interrupt cookie */
204 void *vr_ats; /* shutdown hook */
205 bus_space_tag_t vr_bst; /* bus space tag */
206 bus_space_handle_t vr_bsh; /* bus space handle */
207 bus_dma_tag_t vr_dmat; /* bus DMA tag */
208 pci_chipset_tag_t vr_pc; /* PCI chipset info */
209 struct ethercom vr_ec; /* Ethernet common info */
210 u_int8_t vr_enaddr[ETHER_ADDR_LEN];
211 struct mii_data vr_mii; /* MII/media info */
212
213 struct callout vr_tick_ch; /* tick callout */
214
215 bus_dmamap_t vr_cddmamap; /* control data DMA map */
216 #define vr_cddma vr_cddmamap->dm_segs[0].ds_addr
217
218 /*
219 * Software state for transmit and receive descriptors.
220 */
221 struct vr_descsoft vr_txsoft[VR_NTXDESC];
222 struct vr_descsoft vr_rxsoft[VR_NRXDESC];
223
224 /*
225 * Control data structures.
226 */
227 struct vr_control_data *vr_control_data;
228
229 int vr_txpending; /* number of TX requests pending */
230 int vr_txdirty; /* first dirty TX descriptor */
231 int vr_txlast; /* last used TX descriptor */
232
233 int vr_rxptr; /* next ready RX descriptor */
234 };
235
236 #define VR_CDTXADDR(sc, x) ((sc)->vr_cddma + VR_CDTXOFF((x)))
237 #define VR_CDRXADDR(sc, x) ((sc)->vr_cddma + VR_CDRXOFF((x)))
238
239 #define VR_CDTX(sc, x) (&(sc)->vr_control_data->vr_txdescs[(x)])
240 #define VR_CDRX(sc, x) (&(sc)->vr_control_data->vr_rxdescs[(x)])
241
242 #define VR_DSTX(sc, x) (&(sc)->vr_txsoft[(x)])
243 #define VR_DSRX(sc, x) (&(sc)->vr_rxsoft[(x)])
244
245 #define VR_CDTXSYNC(sc, x, ops) \
246 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \
247 VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops))
248
249 #define VR_CDRXSYNC(sc, x, ops) \
250 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \
251 VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops))
252
253 /*
254 * Note we rely on MCLBYTES being a power of two below.
255 */
256 #define VR_INIT_RXDESC(sc, i) \
257 do { \
258 struct vr_desc *__d = VR_CDRX((sc), (i)); \
259 struct vr_descsoft *__ds = VR_DSRX((sc), (i)); \
260 \
261 __d->vr_next = htole32(VR_CDRXADDR((sc), VR_NEXTRX((i)))); \
262 __d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG | \
263 VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN); \
264 __d->vr_data = htole32(__ds->ds_dmamap->dm_segs[0].ds_addr); \
265 __d->vr_ctl = htole32(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR | \
266 ((MCLBYTES - 1) & VR_RXCTL_BUFLEN)); \
267 VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
268 } while (0)
269
270 /*
271 * register space access macros
272 */
273 #define CSR_WRITE_4(sc, reg, val) \
274 bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
275 #define CSR_WRITE_2(sc, reg, val) \
276 bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
277 #define CSR_WRITE_1(sc, reg, val) \
278 bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
279
280 #define CSR_READ_4(sc, reg) \
281 bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
282 #define CSR_READ_2(sc, reg) \
283 bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
284 #define CSR_READ_1(sc, reg) \
285 bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
286
287 #define VR_TIMEOUT 1000
288
289 static int vr_add_rxbuf __P((struct vr_softc *, int));
290
291 static void vr_rxeof __P((struct vr_softc *));
292 static void vr_rxeoc __P((struct vr_softc *));
293 static void vr_txeof __P((struct vr_softc *));
294 static int vr_intr __P((void *));
295 static void vr_start __P((struct ifnet *));
296 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t));
297 static int vr_init __P((struct ifnet *));
298 static void vr_stop __P((struct ifnet *, int));
299 static void vr_rxdrain __P((struct vr_softc *));
300 static void vr_watchdog __P((struct ifnet *));
301 static void vr_tick __P((void *));
302
303 static int vr_ifmedia_upd __P((struct ifnet *));
304 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
305
306 static int vr_mii_readreg __P((struct device *, int, int));
307 static void vr_mii_writereg __P((struct device *, int, int, int));
308 static void vr_mii_statchg __P((struct device *));
309
310 static void vr_setmulti __P((struct vr_softc *));
311 static void vr_reset __P((struct vr_softc *));
312
313 int vr_copy_small = 0;
314
315 #define VR_SETBIT(sc, reg, x) \
316 CSR_WRITE_1(sc, reg, \
317 CSR_READ_1(sc, reg) | x)
318
319 #define VR_CLRBIT(sc, reg, x) \
320 CSR_WRITE_1(sc, reg, \
321 CSR_READ_1(sc, reg) & ~x)
322
323 #define VR_SETBIT16(sc, reg, x) \
324 CSR_WRITE_2(sc, reg, \
325 CSR_READ_2(sc, reg) | x)
326
327 #define VR_CLRBIT16(sc, reg, x) \
328 CSR_WRITE_2(sc, reg, \
329 CSR_READ_2(sc, reg) & ~x)
330
331 #define VR_SETBIT32(sc, reg, x) \
332 CSR_WRITE_4(sc, reg, \
333 CSR_READ_4(sc, reg) | x)
334
335 #define VR_CLRBIT32(sc, reg, x) \
336 CSR_WRITE_4(sc, reg, \
337 CSR_READ_4(sc, reg) & ~x)
338
339 /*
340 * MII bit-bang glue.
341 */
342 u_int32_t vr_mii_bitbang_read __P((struct device *));
343 void vr_mii_bitbang_write __P((struct device *, u_int32_t));
344
345 const struct mii_bitbang_ops vr_mii_bitbang_ops = {
346 vr_mii_bitbang_read,
347 vr_mii_bitbang_write,
348 {
349 VR_MIICMD_DATAOUT, /* MII_BIT_MDO */
350 VR_MIICMD_DATAIN, /* MII_BIT_MDI */
351 VR_MIICMD_CLK, /* MII_BIT_MDC */
352 VR_MIICMD_DIR, /* MII_BIT_DIR_HOST_PHY */
353 0, /* MII_BIT_DIR_PHY_HOST */
354 }
355 };
356
357 u_int32_t
358 vr_mii_bitbang_read(self)
359 struct device *self;
360 {
361 struct vr_softc *sc = (void *) self;
362
363 return (CSR_READ_1(sc, VR_MIICMD));
364 }
365
366 void
367 vr_mii_bitbang_write(self, val)
368 struct device *self;
369 u_int32_t val;
370 {
371 struct vr_softc *sc = (void *) self;
372
373 CSR_WRITE_1(sc, VR_MIICMD, (val & 0xff) | VR_MIICMD_DIRECTPGM);
374 }
375
376 /*
377 * Read an PHY register through the MII.
378 */
379 static int
380 vr_mii_readreg(self, phy, reg)
381 struct device *self;
382 int phy, reg;
383 {
384 struct vr_softc *sc = (void *) self;
385
386 CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
387 return (mii_bitbang_readreg(self, &vr_mii_bitbang_ops, phy, reg));
388 }
389
390 /*
391 * Write to a PHY register through the MII.
392 */
393 static void
394 vr_mii_writereg(self, phy, reg, val)
395 struct device *self;
396 int phy, reg, val;
397 {
398 struct vr_softc *sc = (void *) self;
399
400 CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
401 mii_bitbang_writereg(self, &vr_mii_bitbang_ops, phy, reg, val);
402 }
403
404 static void
405 vr_mii_statchg(self)
406 struct device *self;
407 {
408 struct vr_softc *sc = (struct vr_softc *)self;
409
410 /*
411 * In order to fiddle with the 'full-duplex' bit in the netconfig
412 * register, we first have to put the transmit and/or receive logic
413 * in the idle state.
414 */
415 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
416
417 if (sc->vr_mii.mii_media_active & IFM_FDX)
418 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
419 else
420 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
421
422 if (sc->vr_ec.ec_if.if_flags & IFF_RUNNING)
423 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
424 }
425
426 #define vr_calchash(addr) \
427 (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
428
429 /*
430 * Program the 64-bit multicast hash filter.
431 */
432 static void
433 vr_setmulti(sc)
434 struct vr_softc *sc;
435 {
436 struct ifnet *ifp;
437 int h = 0;
438 u_int32_t hashes[2] = { 0, 0 };
439 struct ether_multistep step;
440 struct ether_multi *enm;
441 int mcnt = 0;
442 u_int8_t rxfilt;
443
444 ifp = &sc->vr_ec.ec_if;
445
446 rxfilt = CSR_READ_1(sc, VR_RXCFG);
447
448 if (ifp->if_flags & IFF_PROMISC) {
449 allmulti:
450 ifp->if_flags |= IFF_ALLMULTI;
451 rxfilt |= VR_RXCFG_RX_MULTI;
452 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
453 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
454 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
455 return;
456 }
457
458 /* first, zot all the existing hash bits */
459 CSR_WRITE_4(sc, VR_MAR0, 0);
460 CSR_WRITE_4(sc, VR_MAR1, 0);
461
462 /* now program new ones */
463 ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
464 while (enm != NULL) {
465 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
466 ETHER_ADDR_LEN) != 0)
467 goto allmulti;
468
469 h = vr_calchash(enm->enm_addrlo);
470
471 if (h < 32)
472 hashes[0] |= (1 << h);
473 else
474 hashes[1] |= (1 << (h - 32));
475 ETHER_NEXT_MULTI(step, enm);
476 mcnt++;
477 }
478
479 ifp->if_flags &= ~IFF_ALLMULTI;
480
481 if (mcnt)
482 rxfilt |= VR_RXCFG_RX_MULTI;
483 else
484 rxfilt &= ~VR_RXCFG_RX_MULTI;
485
486 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
487 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
488 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
489 }
490
491 static void
492 vr_reset(sc)
493 struct vr_softc *sc;
494 {
495 int i;
496
497 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
498
499 for (i = 0; i < VR_TIMEOUT; i++) {
500 DELAY(10);
501 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
502 break;
503 }
504 if (i == VR_TIMEOUT)
505 printf("%s: reset never completed!\n",
506 sc->vr_dev.dv_xname);
507
508 /* Wait a little while for the chip to get its brains in order. */
509 DELAY(1000);
510 }
511
512 /*
513 * Initialize an RX descriptor and attach an MBUF cluster.
514 * Note: the length fields are only 11 bits wide, which means the
515 * largest size we can specify is 2047. This is important because
516 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
517 * overflow the field and make a mess.
518 */
519 static int
520 vr_add_rxbuf(sc, i)
521 struct vr_softc *sc;
522 int i;
523 {
524 struct vr_descsoft *ds = VR_DSRX(sc, i);
525 struct mbuf *m_new;
526 int error;
527
528 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
529 if (m_new == NULL)
530 return (ENOBUFS);
531
532 MCLGET(m_new, M_DONTWAIT);
533 if ((m_new->m_flags & M_EXT) == 0) {
534 m_freem(m_new);
535 return (ENOBUFS);
536 }
537
538 if (ds->ds_mbuf != NULL)
539 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
540
541 ds->ds_mbuf = m_new;
542
543 error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap,
544 m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL,
545 BUS_DMA_READ|BUS_DMA_NOWAIT);
546 if (error) {
547 printf("%s: unable to load rx DMA map %d, error = %d\n",
548 sc->vr_dev.dv_xname, i, error);
549 panic("vr_add_rxbuf"); /* XXX */
550 }
551
552 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
553 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
554
555 VR_INIT_RXDESC(sc, i);
556
557 return (0);
558 }
559
560 /*
561 * A frame has been uploaded: pass the resulting mbuf chain up to
562 * the higher level protocols.
563 */
564 static void
565 vr_rxeof(sc)
566 struct vr_softc *sc;
567 {
568 struct mbuf *m;
569 struct ifnet *ifp;
570 struct vr_desc *d;
571 struct vr_descsoft *ds;
572 int i, total_len;
573 u_int32_t rxstat;
574
575 ifp = &sc->vr_ec.ec_if;
576
577 for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) {
578 d = VR_CDRX(sc, i);
579 ds = VR_DSRX(sc, i);
580
581 VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
582
583 rxstat = le32toh(d->vr_status);
584
585 if (rxstat & VR_RXSTAT_OWN) {
586 /*
587 * We have processed all of the receive buffers.
588 */
589 break;
590 }
591
592 /*
593 * If an error occurs, update stats, clear the
594 * status word and leave the mbuf cluster in place:
595 * it should simply get re-used next time this descriptor
596 * comes up in the ring.
597 */
598 if (rxstat & VR_RXSTAT_RXERR) {
599 const char *errstr;
600
601 ifp->if_ierrors++;
602 switch (rxstat & 0x000000FF) {
603 case VR_RXSTAT_CRCERR:
604 errstr = "crc error";
605 break;
606 case VR_RXSTAT_FRAMEALIGNERR:
607 errstr = "frame alignment error";
608 break;
609 case VR_RXSTAT_FIFOOFLOW:
610 errstr = "FIFO overflow";
611 break;
612 case VR_RXSTAT_GIANT:
613 errstr = "received giant packet";
614 break;
615 case VR_RXSTAT_RUNT:
616 errstr = "received runt packet";
617 break;
618 case VR_RXSTAT_BUSERR:
619 errstr = "system bus error";
620 break;
621 case VR_RXSTAT_BUFFERR:
622 errstr = "rx buffer error";
623 break;
624 default:
625 errstr = "unknown rx error";
626 break;
627 }
628 printf("%s: receive error: %s\n", sc->vr_dev.dv_xname,
629 errstr);
630
631 VR_INIT_RXDESC(sc, i);
632
633 continue;
634 }
635
636 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
637 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
638
639 /* No errors; receive the packet. */
640 total_len = VR_RXBYTES(le32toh(d->vr_status));
641
642 #ifdef __NO_STRICT_ALIGNMENT
643 /*
644 * If the packet is small enough to fit in a
645 * single header mbuf, allocate one and copy
646 * the data into it. This greatly reduces
647 * memory consumption when we receive lots
648 * of small packets.
649 *
650 * Otherwise, we add a new buffer to the receive
651 * chain. If this fails, we drop the packet and
652 * recycle the old buffer.
653 */
654 if (vr_copy_small != 0 && total_len <= MHLEN) {
655 MGETHDR(m, M_DONTWAIT, MT_DATA);
656 if (m == NULL)
657 goto dropit;
658 memcpy(mtod(m, caddr_t),
659 mtod(ds->ds_mbuf, caddr_t), total_len);
660 VR_INIT_RXDESC(sc, i);
661 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
662 ds->ds_dmamap->dm_mapsize,
663 BUS_DMASYNC_PREREAD);
664 } else {
665 m = ds->ds_mbuf;
666 if (vr_add_rxbuf(sc, i) == ENOBUFS) {
667 dropit:
668 ifp->if_ierrors++;
669 VR_INIT_RXDESC(sc, i);
670 bus_dmamap_sync(sc->vr_dmat,
671 ds->ds_dmamap, 0,
672 ds->ds_dmamap->dm_mapsize,
673 BUS_DMASYNC_PREREAD);
674 continue;
675 }
676 }
677 #else
678 /*
679 * The Rhine's packet buffers must be 4-byte aligned.
680 * But this means that the data after the Ethernet header
681 * is misaligned. We must allocate a new buffer and
682 * copy the data, shifted forward 2 bytes.
683 */
684 MGETHDR(m, M_DONTWAIT, MT_DATA);
685 if (m == NULL) {
686 dropit:
687 ifp->if_ierrors++;
688 VR_INIT_RXDESC(sc, i);
689 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
690 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
691 continue;
692 }
693 if (total_len > (MHLEN - 2)) {
694 MCLGET(m, M_DONTWAIT);
695 if ((m->m_flags & M_EXT) == 0) {
696 m_freem(m);
697 goto dropit;
698 }
699 }
700 m->m_data += 2;
701
702 /*
703 * Note that we use clusters for incoming frames, so the
704 * buffer is virtually contiguous.
705 */
706 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t),
707 total_len);
708
709 /* Allow the receive descriptor to continue using its mbuf. */
710 VR_INIT_RXDESC(sc, i);
711 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
712 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
713 #endif /* __NO_STRICT_ALIGNMENT */
714
715 /*
716 * The Rhine chip includes the FCS with every
717 * received packet.
718 */
719 m->m_flags |= M_HASFCS;
720
721 ifp->if_ipackets++;
722 m->m_pkthdr.rcvif = ifp;
723 m->m_pkthdr.len = m->m_len = total_len;
724 #if NBPFILTER > 0
725 /*
726 * Handle BPF listeners. Let the BPF user see the packet, but
727 * don't pass it up to the ether_input() layer unless it's
728 * a broadcast packet, multicast packet, matches our ethernet
729 * address or the interface is in promiscuous mode.
730 */
731 if (ifp->if_bpf)
732 bpf_mtap(ifp->if_bpf, m);
733 #endif
734 /* Pass it on. */
735 (*ifp->if_input)(ifp, m);
736 }
737
738 /* Update the receive pointer. */
739 sc->vr_rxptr = i;
740 }
741
742 void
743 vr_rxeoc(sc)
744 struct vr_softc *sc;
745 {
746
747 vr_rxeof(sc);
748 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
749 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
750 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
751 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
752 }
753
754 /*
755 * A frame was downloaded to the chip. It's safe for us to clean up
756 * the list buffers.
757 */
758 static void
759 vr_txeof(sc)
760 struct vr_softc *sc;
761 {
762 struct ifnet *ifp = &sc->vr_ec.ec_if;
763 struct vr_desc *d;
764 struct vr_descsoft *ds;
765 u_int32_t txstat;
766 int i;
767
768 ifp->if_flags &= ~IFF_OACTIVE;
769
770 /*
771 * Go through our tx list and free mbufs for those
772 * frames that have been transmitted.
773 */
774 for (i = sc->vr_txdirty; sc->vr_txpending != 0;
775 i = VR_NEXTTX(i), sc->vr_txpending--) {
776 d = VR_CDTX(sc, i);
777 ds = VR_DSTX(sc, i);
778
779 VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
780
781 txstat = le32toh(d->vr_status);
782 if (txstat & VR_TXSTAT_OWN)
783 break;
784
785 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap,
786 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
787 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
788 m_freem(ds->ds_mbuf);
789 ds->ds_mbuf = NULL;
790
791 if (txstat & VR_TXSTAT_ERRSUM) {
792 ifp->if_oerrors++;
793 if (txstat & VR_TXSTAT_DEFER)
794 ifp->if_collisions++;
795 if (txstat & VR_TXSTAT_LATECOLL)
796 ifp->if_collisions++;
797 }
798
799 ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3;
800 ifp->if_opackets++;
801 }
802
803 /* Update the dirty transmit buffer pointer. */
804 sc->vr_txdirty = i;
805
806 /*
807 * Cancel the watchdog timer if there are no pending
808 * transmissions.
809 */
810 if (sc->vr_txpending == 0)
811 ifp->if_timer = 0;
812 }
813
814 static int
815 vr_intr(arg)
816 void *arg;
817 {
818 struct vr_softc *sc;
819 struct ifnet *ifp;
820 u_int16_t status;
821 int handled = 0, dotx = 0;
822
823 sc = arg;
824 ifp = &sc->vr_ec.ec_if;
825
826 /* Suppress unwanted interrupts. */
827 if ((ifp->if_flags & IFF_UP) == 0) {
828 vr_stop(ifp, 1);
829 return (0);
830 }
831
832 /* Disable interrupts. */
833 CSR_WRITE_2(sc, VR_IMR, 0x0000);
834
835 for (;;) {
836 status = CSR_READ_2(sc, VR_ISR);
837 if (status)
838 CSR_WRITE_2(sc, VR_ISR, status);
839
840 if ((status & VR_INTRS) == 0)
841 break;
842
843 handled = 1;
844
845 if (status & VR_ISR_RX_OK)
846 vr_rxeof(sc);
847
848 if (status &
849 (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW |
850 VR_ISR_RX_DROPPED))
851 vr_rxeoc(sc);
852
853 if (status & VR_ISR_TX_OK) {
854 dotx = 1;
855 vr_txeof(sc);
856 }
857
858 if (status & (VR_ISR_TX_UNDERRUN | VR_ISR_TX_ABRT)) {
859 if (status & VR_ISR_TX_UNDERRUN)
860 printf("%s: transmit underrun\n",
861 sc->vr_dev.dv_xname);
862 if (status & VR_ISR_TX_ABRT)
863 printf("%s: transmit aborted\n",
864 sc->vr_dev.dv_xname);
865 ifp->if_oerrors++;
866 dotx = 1;
867 vr_txeof(sc);
868 if (sc->vr_txpending) {
869 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
870 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
871 }
872 }
873
874 if (status & VR_ISR_BUSERR) {
875 printf("%s: PCI bus error\n", sc->vr_dev.dv_xname);
876 /* vr_init() calls vr_start() */
877 dotx = 0;
878 (void) vr_init(ifp);
879 }
880 }
881
882 /* Re-enable interrupts. */
883 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
884
885 if (dotx)
886 vr_start(ifp);
887
888 return (handled);
889 }
890
891 /*
892 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
893 * to the mbuf data regions directly in the transmit lists. We also save a
894 * copy of the pointers since the transmit list fragment pointers are
895 * physical addresses.
896 */
897 static void
898 vr_start(ifp)
899 struct ifnet *ifp;
900 {
901 struct vr_softc *sc = ifp->if_softc;
902 struct mbuf *m0, *m;
903 struct vr_desc *d;
904 struct vr_descsoft *ds;
905 int error, firsttx, nexttx, opending;
906
907 /*
908 * Remember the previous txpending and the first transmit
909 * descriptor we use.
910 */
911 opending = sc->vr_txpending;
912 firsttx = VR_NEXTTX(sc->vr_txlast);
913
914 /*
915 * Loop through the send queue, setting up transmit descriptors
916 * until we drain the queue, or use up all available transmit
917 * descriptors.
918 */
919 while (sc->vr_txpending < VR_NTXDESC) {
920 /*
921 * Grab a packet off the queue.
922 */
923 IFQ_POLL(&ifp->if_snd, m0);
924 if (m0 == NULL)
925 break;
926 m = NULL;
927
928 /*
929 * Get the next available transmit descriptor.
930 */
931 nexttx = VR_NEXTTX(sc->vr_txlast);
932 d = VR_CDTX(sc, nexttx);
933 ds = VR_DSTX(sc, nexttx);
934
935 /*
936 * Load the DMA map. If this fails, the packet didn't
937 * fit in one DMA segment, and we need to copy. Note,
938 * the packet must also be aligned.
939 */
940 if ((mtod(m0, bus_addr_t) & 3) != 0 ||
941 bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0,
942 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
943 MGETHDR(m, M_DONTWAIT, MT_DATA);
944 if (m == NULL) {
945 printf("%s: unable to allocate Tx mbuf\n",
946 sc->vr_dev.dv_xname);
947 break;
948 }
949 if (m0->m_pkthdr.len > MHLEN) {
950 MCLGET(m, M_DONTWAIT);
951 if ((m->m_flags & M_EXT) == 0) {
952 printf("%s: unable to allocate Tx "
953 "cluster\n", sc->vr_dev.dv_xname);
954 m_freem(m);
955 break;
956 }
957 }
958 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
959 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
960 error = bus_dmamap_load_mbuf(sc->vr_dmat,
961 ds->ds_dmamap, m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
962 if (error) {
963 printf("%s: unable to load Tx buffer, "
964 "error = %d\n", sc->vr_dev.dv_xname, error);
965 break;
966 }
967 }
968
969 IFQ_DEQUEUE(&ifp->if_snd, m0);
970 if (m != NULL) {
971 m_freem(m0);
972 m0 = m;
973 }
974
975 /* Sync the DMA map. */
976 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
977 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
978
979 /*
980 * Store a pointer to the packet so we can free it later.
981 */
982 ds->ds_mbuf = m0;
983
984 #if NBPFILTER > 0
985 /*
986 * If there's a BPF listener, bounce a copy of this frame
987 * to him.
988 */
989 if (ifp->if_bpf)
990 bpf_mtap(ifp->if_bpf, m0);
991 #endif
992
993 /*
994 * Fill in the transmit descriptor. The Rhine
995 * doesn't auto-pad, so we have to do this ourselves.
996 */
997 d->vr_data = htole32(ds->ds_dmamap->dm_segs[0].ds_addr);
998 d->vr_ctl = htole32(m0->m_pkthdr.len < VR_MIN_FRAMELEN ?
999 VR_MIN_FRAMELEN : m0->m_pkthdr.len);
1000 d->vr_ctl |=
1001 htole32(VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG|
1002 VR_TXCTL_LASTFRAG);
1003
1004 /*
1005 * If this is the first descriptor we're enqueuing,
1006 * don't give it to the Rhine yet. That could cause
1007 * a race condition. We'll do it below.
1008 */
1009 if (nexttx == firsttx)
1010 d->vr_status = 0;
1011 else
1012 d->vr_status = htole32(VR_TXSTAT_OWN);
1013
1014 VR_CDTXSYNC(sc, nexttx,
1015 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1016
1017 /* Advance the tx pointer. */
1018 sc->vr_txpending++;
1019 sc->vr_txlast = nexttx;
1020 }
1021
1022 if (sc->vr_txpending == VR_NTXDESC) {
1023 /* No more slots left; notify upper layer. */
1024 ifp->if_flags |= IFF_OACTIVE;
1025 }
1026
1027 if (sc->vr_txpending != opending) {
1028 /*
1029 * We enqueued packets. If the transmitter was idle,
1030 * reset the txdirty pointer.
1031 */
1032 if (opending == 0)
1033 sc->vr_txdirty = firsttx;
1034
1035 /*
1036 * Cause a transmit interrupt to happen on the
1037 * last packet we enqueued.
1038 */
1039 VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htole32(VR_TXCTL_FINT);
1040 VR_CDTXSYNC(sc, sc->vr_txlast,
1041 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1042
1043 /*
1044 * The entire packet chain is set up. Give the
1045 * first descriptor to the Rhine now.
1046 */
1047 VR_CDTX(sc, firsttx)->vr_status = htole32(VR_TXSTAT_OWN);
1048 VR_CDTXSYNC(sc, firsttx,
1049 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1050
1051 /* Start the transmitter. */
1052 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1053
1054 /* Set the watchdog timer in case the chip flakes out. */
1055 ifp->if_timer = 5;
1056 }
1057 }
1058
1059 /*
1060 * Initialize the interface. Must be called at splnet.
1061 */
1062 static int
1063 vr_init(ifp)
1064 struct ifnet *ifp;
1065 {
1066 struct vr_softc *sc = ifp->if_softc;
1067 struct vr_desc *d;
1068 struct vr_descsoft *ds;
1069 int i, error = 0;
1070
1071 /* Cancel pending I/O. */
1072 vr_stop(ifp, 0);
1073
1074 /* Reset the Rhine to a known state. */
1075 vr_reset(sc);
1076
1077 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1078 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1079
1080 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1081 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1082
1083 /*
1084 * Initialize the transmit desciptor ring. txlast is initialized
1085 * to the end of the list so that it will wrap around to the first
1086 * descriptor when the first packet is transmitted.
1087 */
1088 for (i = 0; i < VR_NTXDESC; i++) {
1089 d = VR_CDTX(sc, i);
1090 memset(d, 0, sizeof(struct vr_desc));
1091 d->vr_next = htole32(VR_CDTXADDR(sc, VR_NEXTTX(i)));
1092 VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1093 }
1094 sc->vr_txpending = 0;
1095 sc->vr_txdirty = 0;
1096 sc->vr_txlast = VR_NTXDESC - 1;
1097
1098 /*
1099 * Initialize the receive descriptor ring.
1100 */
1101 for (i = 0; i < VR_NRXDESC; i++) {
1102 ds = VR_DSRX(sc, i);
1103 if (ds->ds_mbuf == NULL) {
1104 if ((error = vr_add_rxbuf(sc, i)) != 0) {
1105 printf("%s: unable to allocate or map rx "
1106 "buffer %d, error = %d\n",
1107 sc->vr_dev.dv_xname, i, error);
1108 /*
1109 * XXX Should attempt to run with fewer receive
1110 * XXX buffers instead of just failing.
1111 */
1112 vr_rxdrain(sc);
1113 goto out;
1114 }
1115 }
1116 }
1117 sc->vr_rxptr = 0;
1118
1119 /* If we want promiscuous mode, set the allframes bit. */
1120 if (ifp->if_flags & IFF_PROMISC)
1121 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1122 else
1123 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1124
1125 /* Set capture broadcast bit to capture broadcast frames. */
1126 if (ifp->if_flags & IFF_BROADCAST)
1127 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1128 else
1129 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1130
1131 /* Program the multicast filter, if necessary. */
1132 vr_setmulti(sc);
1133
1134 /* Give the transmit and receive rings to the Rhine. */
1135 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
1136 CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast)));
1137
1138 /* Set current media. */
1139 mii_mediachg(&sc->vr_mii);
1140
1141 /* Enable receiver and transmitter. */
1142 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1143 VR_CMD_TX_ON|VR_CMD_RX_ON|
1144 VR_CMD_RX_GO);
1145
1146 /* Enable interrupts. */
1147 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1148 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1149
1150 ifp->if_flags |= IFF_RUNNING;
1151 ifp->if_flags &= ~IFF_OACTIVE;
1152
1153 /* Start one second timer. */
1154 callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1155
1156 /* Attempt to start output on the interface. */
1157 vr_start(ifp);
1158
1159 out:
1160 if (error)
1161 printf("%s: interface not running\n", sc->vr_dev.dv_xname);
1162 return (error);
1163 }
1164
1165 /*
1166 * Set media options.
1167 */
1168 static int
1169 vr_ifmedia_upd(ifp)
1170 struct ifnet *ifp;
1171 {
1172 struct vr_softc *sc = ifp->if_softc;
1173
1174 if (ifp->if_flags & IFF_UP)
1175 mii_mediachg(&sc->vr_mii);
1176 return (0);
1177 }
1178
1179 /*
1180 * Report current media status.
1181 */
1182 static void
1183 vr_ifmedia_sts(ifp, ifmr)
1184 struct ifnet *ifp;
1185 struct ifmediareq *ifmr;
1186 {
1187 struct vr_softc *sc = ifp->if_softc;
1188
1189 mii_pollstat(&sc->vr_mii);
1190 ifmr->ifm_status = sc->vr_mii.mii_media_status;
1191 ifmr->ifm_active = sc->vr_mii.mii_media_active;
1192 }
1193
1194 static int
1195 vr_ioctl(ifp, command, data)
1196 struct ifnet *ifp;
1197 u_long command;
1198 caddr_t data;
1199 {
1200 struct vr_softc *sc = ifp->if_softc;
1201 struct ifreq *ifr = (struct ifreq *)data;
1202 int s, error = 0;
1203
1204 s = splnet();
1205
1206 switch (command) {
1207 case SIOCGIFMEDIA:
1208 case SIOCSIFMEDIA:
1209 error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
1210 break;
1211
1212 default:
1213 error = ether_ioctl(ifp, command, data);
1214 if (error == ENETRESET) {
1215 /*
1216 * Multicast list has changed; set the hardware filter
1217 * accordingly.
1218 */
1219 vr_setmulti(sc);
1220 error = 0;
1221 }
1222 break;
1223 }
1224
1225 splx(s);
1226 return (error);
1227 }
1228
1229 static void
1230 vr_watchdog(ifp)
1231 struct ifnet *ifp;
1232 {
1233 struct vr_softc *sc = ifp->if_softc;
1234
1235 printf("%s: device timeout\n", sc->vr_dev.dv_xname);
1236 ifp->if_oerrors++;
1237
1238 (void) vr_init(ifp);
1239 }
1240
1241 /*
1242 * One second timer, used to tick MII.
1243 */
1244 static void
1245 vr_tick(arg)
1246 void *arg;
1247 {
1248 struct vr_softc *sc = arg;
1249 int s;
1250
1251 s = splnet();
1252 mii_tick(&sc->vr_mii);
1253 splx(s);
1254
1255 callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1256 }
1257
1258 /*
1259 * Drain the receive queue.
1260 */
1261 static void
1262 vr_rxdrain(sc)
1263 struct vr_softc *sc;
1264 {
1265 struct vr_descsoft *ds;
1266 int i;
1267
1268 for (i = 0; i < VR_NRXDESC; i++) {
1269 ds = VR_DSRX(sc, i);
1270 if (ds->ds_mbuf != NULL) {
1271 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1272 m_freem(ds->ds_mbuf);
1273 ds->ds_mbuf = NULL;
1274 }
1275 }
1276 }
1277
1278 /*
1279 * Stop the adapter and free any mbufs allocated to the
1280 * transmit lists.
1281 */
1282 static void
1283 vr_stop(ifp, disable)
1284 struct ifnet *ifp;
1285 int disable;
1286 {
1287 struct vr_softc *sc = ifp->if_softc;
1288 struct vr_descsoft *ds;
1289 int i;
1290
1291 /* Cancel one second timer. */
1292 callout_stop(&sc->vr_tick_ch);
1293
1294 /* Down the MII. */
1295 mii_down(&sc->vr_mii);
1296
1297 ifp = &sc->vr_ec.ec_if;
1298 ifp->if_timer = 0;
1299
1300 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1301 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1302 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1303 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1304 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1305
1306 /*
1307 * Release any queued transmit buffers.
1308 */
1309 for (i = 0; i < VR_NTXDESC; i++) {
1310 ds = VR_DSTX(sc, i);
1311 if (ds->ds_mbuf != NULL) {
1312 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1313 m_freem(ds->ds_mbuf);
1314 ds->ds_mbuf = NULL;
1315 }
1316 }
1317
1318 if (disable)
1319 vr_rxdrain(sc);
1320
1321 /*
1322 * Mark the interface down and cancel the watchdog timer.
1323 */
1324 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1325 ifp->if_timer = 0;
1326 }
1327
1328 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1329 static int vr_probe __P((struct device *, struct cfdata *, void *));
1330 static void vr_attach __P((struct device *, struct device *, void *));
1331 static void vr_shutdown __P((void *));
1332
1333 struct cfattach vr_ca = {
1334 sizeof (struct vr_softc), vr_probe, vr_attach
1335 };
1336
1337 static struct vr_type *
1338 vr_lookup(pa)
1339 struct pci_attach_args *pa;
1340 {
1341 struct vr_type *vrt;
1342
1343 for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1344 if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1345 PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1346 return (vrt);
1347 }
1348 return (NULL);
1349 }
1350
1351 static int
1352 vr_probe(parent, match, aux)
1353 struct device *parent;
1354 struct cfdata *match;
1355 void *aux;
1356 {
1357 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1358
1359 if (vr_lookup(pa) != NULL)
1360 return (1);
1361
1362 return (0);
1363 }
1364
1365 /*
1366 * Stop all chip I/O so that the kernel's probe routines don't
1367 * get confused by errant DMAs when rebooting.
1368 */
1369 static void
1370 vr_shutdown(arg)
1371 void *arg;
1372 {
1373 struct vr_softc *sc = (struct vr_softc *)arg;
1374
1375 vr_stop(&sc->vr_ec.ec_if, 1);
1376 }
1377
1378 /*
1379 * Attach the interface. Allocate softc structures, do ifmedia
1380 * setup and ethernet/BPF attach.
1381 */
1382 static void
1383 vr_attach(parent, self, aux)
1384 struct device *parent;
1385 struct device *self;
1386 void *aux;
1387 {
1388 struct vr_softc *sc = (struct vr_softc *) self;
1389 struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1390 bus_dma_segment_t seg;
1391 struct vr_type *vrt;
1392 u_int32_t command;
1393 struct ifnet *ifp;
1394 u_char eaddr[ETHER_ADDR_LEN];
1395 int i, rseg, error;
1396
1397 #define PCI_CONF_WRITE(r, v) pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1398 #define PCI_CONF_READ(r) pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1399
1400 callout_init(&sc->vr_tick_ch);
1401
1402 vrt = vr_lookup(pa);
1403 if (vrt == NULL) {
1404 printf("\n");
1405 panic("vr_attach: impossible");
1406 }
1407
1408 printf(": %s Ethernet\n", vrt->vr_name);
1409
1410 /*
1411 * Handle power management nonsense.
1412 */
1413
1414 command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
1415 if (command == 0x01) {
1416 command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
1417 if (command & VR_PSTATE_MASK) {
1418 u_int32_t iobase, membase, irq;
1419
1420 /* Save important PCI config data. */
1421 iobase = PCI_CONF_READ(VR_PCI_LOIO);
1422 membase = PCI_CONF_READ(VR_PCI_LOMEM);
1423 irq = PCI_CONF_READ(VR_PCI_INTLINE);
1424
1425 /* Reset the power state. */
1426 printf("%s: chip is in D%d power mode "
1427 "-- setting to D0\n",
1428 sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
1429 command &= 0xFFFFFFFC;
1430 PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
1431
1432 /* Restore PCI config data. */
1433 PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1434 PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1435 PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
1436 }
1437 }
1438
1439 /* Make sure bus mastering is enabled. */
1440 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1441 command |= PCI_COMMAND_MASTER_ENABLE;
1442 PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
1443
1444 /*
1445 * Map control/status registers.
1446 */
1447 {
1448 bus_space_tag_t iot, memt;
1449 bus_space_handle_t ioh, memh;
1450 int ioh_valid, memh_valid;
1451 pci_intr_handle_t intrhandle;
1452 const char *intrstr;
1453
1454 ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1455 PCI_MAPREG_TYPE_IO, 0,
1456 &iot, &ioh, NULL, NULL) == 0);
1457 memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1458 PCI_MAPREG_TYPE_MEM |
1459 PCI_MAPREG_MEM_TYPE_32BIT,
1460 0, &memt, &memh, NULL, NULL) == 0);
1461 #if defined(VR_USEIOSPACE)
1462 if (ioh_valid) {
1463 sc->vr_bst = iot;
1464 sc->vr_bsh = ioh;
1465 } else if (memh_valid) {
1466 sc->vr_bst = memt;
1467 sc->vr_bsh = memh;
1468 }
1469 #else
1470 if (memh_valid) {
1471 sc->vr_bst = memt;
1472 sc->vr_bsh = memh;
1473 } else if (ioh_valid) {
1474 sc->vr_bst = iot;
1475 sc->vr_bsh = ioh;
1476 }
1477 #endif
1478 else {
1479 printf(": unable to map device registers\n");
1480 return;
1481 }
1482
1483 /* Allocate interrupt */
1484 if (pci_intr_map(pa, &intrhandle)) {
1485 printf("%s: couldn't map interrupt\n",
1486 sc->vr_dev.dv_xname);
1487 return;
1488 }
1489 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1490 sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1491 vr_intr, sc);
1492 if (sc->vr_ih == NULL) {
1493 printf("%s: couldn't establish interrupt",
1494 sc->vr_dev.dv_xname);
1495 if (intrstr != NULL)
1496 printf(" at %s", intrstr);
1497 printf("\n");
1498 }
1499 printf("%s: interrupting at %s\n",
1500 sc->vr_dev.dv_xname, intrstr);
1501 }
1502
1503 /* Reset the adapter. */
1504 vr_reset(sc);
1505
1506 /*
1507 * Get station address. The way the Rhine chips work,
1508 * you're not allowed to directly access the EEPROM once
1509 * they've been programmed a special way. Consequently,
1510 * we need to read the node address from the PAR0 and PAR1
1511 * registers.
1512 */
1513 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1514 DELAY(200);
1515 for (i = 0; i < ETHER_ADDR_LEN; i++)
1516 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1517
1518 /*
1519 * A Rhine chip was detected. Inform the world.
1520 */
1521 printf("%s: Ethernet address: %s\n",
1522 sc->vr_dev.dv_xname, ether_sprintf(eaddr));
1523
1524 memcpy(sc->vr_enaddr, eaddr, ETHER_ADDR_LEN);
1525
1526 sc->vr_dmat = pa->pa_dmat;
1527
1528 /*
1529 * Allocate the control data structures, and create and load
1530 * the DMA map for it.
1531 */
1532 if ((error = bus_dmamem_alloc(sc->vr_dmat,
1533 sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
1534 0)) != 0) {
1535 printf("%s: unable to allocate control data, error = %d\n",
1536 sc->vr_dev.dv_xname, error);
1537 goto fail_0;
1538 }
1539
1540 if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg,
1541 sizeof(struct vr_control_data), (caddr_t *)&sc->vr_control_data,
1542 BUS_DMA_COHERENT)) != 0) {
1543 printf("%s: unable to map control data, error = %d\n",
1544 sc->vr_dev.dv_xname, error);
1545 goto fail_1;
1546 }
1547
1548 if ((error = bus_dmamap_create(sc->vr_dmat,
1549 sizeof(struct vr_control_data), 1,
1550 sizeof(struct vr_control_data), 0, 0,
1551 &sc->vr_cddmamap)) != 0) {
1552 printf("%s: unable to create control data DMA map, "
1553 "error = %d\n", sc->vr_dev.dv_xname, error);
1554 goto fail_2;
1555 }
1556
1557 if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap,
1558 sc->vr_control_data, sizeof(struct vr_control_data), NULL,
1559 0)) != 0) {
1560 printf("%s: unable to load control data DMA map, error = %d\n",
1561 sc->vr_dev.dv_xname, error);
1562 goto fail_3;
1563 }
1564
1565 /*
1566 * Create the transmit buffer DMA maps.
1567 */
1568 for (i = 0; i < VR_NTXDESC; i++) {
1569 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES,
1570 1, MCLBYTES, 0, 0,
1571 &VR_DSTX(sc, i)->ds_dmamap)) != 0) {
1572 printf("%s: unable to create tx DMA map %d, "
1573 "error = %d\n", sc->vr_dev.dv_xname, i, error);
1574 goto fail_4;
1575 }
1576 }
1577
1578 /*
1579 * Create the receive buffer DMA maps.
1580 */
1581 for (i = 0; i < VR_NRXDESC; i++) {
1582 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1,
1583 MCLBYTES, 0, 0,
1584 &VR_DSRX(sc, i)->ds_dmamap)) != 0) {
1585 printf("%s: unable to create rx DMA map %d, "
1586 "error = %d\n", sc->vr_dev.dv_xname, i, error);
1587 goto fail_5;
1588 }
1589 VR_DSRX(sc, i)->ds_mbuf = NULL;
1590 }
1591
1592 ifp = &sc->vr_ec.ec_if;
1593 ifp->if_softc = sc;
1594 ifp->if_mtu = ETHERMTU;
1595 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1596 ifp->if_ioctl = vr_ioctl;
1597 ifp->if_start = vr_start;
1598 ifp->if_watchdog = vr_watchdog;
1599 ifp->if_init = vr_init;
1600 ifp->if_stop = vr_stop;
1601 IFQ_SET_READY(&ifp->if_snd);
1602
1603 strcpy(ifp->if_xname, sc->vr_dev.dv_xname);
1604
1605 /*
1606 * Initialize MII/media info.
1607 */
1608 sc->vr_mii.mii_ifp = ifp;
1609 sc->vr_mii.mii_readreg = vr_mii_readreg;
1610 sc->vr_mii.mii_writereg = vr_mii_writereg;
1611 sc->vr_mii.mii_statchg = vr_mii_statchg;
1612 ifmedia_init(&sc->vr_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1613 mii_attach(&sc->vr_dev, &sc->vr_mii, 0xffffffff, MII_PHY_ANY,
1614 MII_OFFSET_ANY, 0);
1615 if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1616 ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1617 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1618 } else
1619 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1620
1621 /*
1622 * Call MI attach routines.
1623 */
1624 if_attach(ifp);
1625 ether_ifattach(ifp, sc->vr_enaddr);
1626
1627 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1628 if (sc->vr_ats == NULL)
1629 printf("%s: warning: couldn't establish shutdown hook\n",
1630 sc->vr_dev.dv_xname);
1631 return;
1632
1633 fail_5:
1634 for (i = 0; i < VR_NRXDESC; i++) {
1635 if (sc->vr_rxsoft[i].ds_dmamap != NULL)
1636 bus_dmamap_destroy(sc->vr_dmat,
1637 sc->vr_rxsoft[i].ds_dmamap);
1638 }
1639 fail_4:
1640 for (i = 0; i < VR_NTXDESC; i++) {
1641 if (sc->vr_txsoft[i].ds_dmamap != NULL)
1642 bus_dmamap_destroy(sc->vr_dmat,
1643 sc->vr_txsoft[i].ds_dmamap);
1644 }
1645 bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap);
1646 fail_3:
1647 bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap);
1648 fail_2:
1649 bus_dmamem_unmap(sc->vr_dmat, (caddr_t)sc->vr_control_data,
1650 sizeof(struct vr_control_data));
1651 fail_1:
1652 bus_dmamem_free(sc->vr_dmat, &seg, rseg);
1653 fail_0:
1654 return;
1655 }
1656