if_vr.c revision 1.10 1 /* $NetBSD: if_vr.c,v 1.10 1999/02/05 01:17:24 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
35 */
36
37 /*
38 * VIA Rhine fast ethernet PCI NIC driver
39 *
40 * Supports various network adapters based on the VIA Rhine
41 * and Rhine II PCI controllers, including the D-Link DFE530TX.
42 * Datasheets are available at http://www.via.com.tw.
43 *
44 * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48
49 /*
50 * The VIA Rhine controllers are similar in some respects to the
51 * the DEC tulip chips, except less complicated. The controller
52 * uses an MII bus and an external physical layer interface. The
53 * receiver has a one entry perfect filter and a 64-bit hash table
54 * multicast filter. Transmit and receive descriptors are similar
55 * to the tulip.
56 *
57 * The Rhine has a serious flaw in its transmit DMA mechanism:
58 * transmit buffers must be longword aligned. Unfortunately,
59 * FreeBSD doesn't guarantee that mbufs will be filled in starting
60 * at longword boundaries, so we have to do a buffer copy before
61 * transmission.
62 */
63
64 #include "opt_inet.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/sockio.h>
69 #include <sys/mbuf.h>
70 #include <sys/malloc.h>
71 #include <sys/kernel.h>
72 #include <sys/socket.h>
73 #include <sys/device.h>
74
75 #include <net/if.h>
76 #include <net/if_arp.h>
77 #include <net/if_dl.h>
78 #include <net/if_media.h>
79 #include <net/if_ether.h>
80
81 #if defined(INET)
82 #include <netinet/in.h>
83 #include <netinet/if_inarp.h>
84 #endif
85
86 #include "bpfilter.h"
87 #if NBPFILTER > 0
88 #include <net/bpf.h>
89 #endif
90
91 #include <vm/vm.h> /* for vtophys */
92
93 #include <machine/bus.h>
94 #include <machine/intr.h>
95
96 #include <dev/mii/mii.h>
97
98 #include <dev/pci/pcireg.h>
99 #include <dev/pci/pcivar.h>
100 #include <dev/pci/pcidevs.h>
101
102 #include <dev/pci/if_vrreg.h>
103
104 #if defined(__NetBSD__) && defined(__alpha__)
105 /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */
106 #undef vtophys
107 #define vtophys(va) alpha_XXX_dmamap((vaddr_t)(va))
108 #endif
109
110 #define VR_USEIOSPACE
111
112 /* #define VR_BACKGROUND_AUTONEG */
113
114 #define ETHER_CRC_LEN 4 /* XXX Should be in a common header. */
115
116 /*
117 * Various supported device vendors/types and their names.
118 */
119 static struct vr_type {
120 pci_vendor_id_t vr_vid;
121 pci_product_id_t vr_did;
122 const char *vr_name;
123 } vr_devs[] = {
124 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
125 "VIA VT3043 Rhine I 10/100BaseTX" },
126 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
127 "VIA VT86C100A Rhine II 10/100BaseTX" },
128 { 0, 0, NULL }
129 };
130
131 /*
132 * Various supported PHY vendors/types and their names. Note that
133 * this driver will work with pretty much any MII-compliant PHY,
134 * so failure to positively identify the chip is not a fatal error.
135 */
136
137 static struct vr_type vr_phys[] = {
138 { TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
139 { TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
140 { NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
141 { LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
142 { INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
143 { SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
144 { 0, 0, "<MII-compliant physical interface>" }
145 };
146
147 struct vr_mii_frame {
148 u_int8_t mii_stdelim;
149 u_int8_t mii_opcode;
150 u_int8_t mii_phyaddr;
151 u_int8_t mii_regaddr;
152 u_int8_t mii_turnaround;
153 u_int16_t mii_data;
154 };
155
156 #define VR_FLAG_FORCEDELAY 1
157 #define VR_FLAG_SCHEDDELAY 2
158 #define VR_FLAG_DELAYTIMEO 3
159
160 struct vr_list_data {
161 struct vr_desc vr_rx_list[VR_RX_LIST_CNT];
162 struct vr_desc vr_tx_list[VR_TX_LIST_CNT];
163 };
164
165 struct vr_chain {
166 struct vr_desc *vr_ptr;
167 struct mbuf *vr_mbuf;
168 struct vr_chain *vr_nextdesc;
169 };
170
171 struct vr_chain_onefrag {
172 struct vr_desc *vr_ptr;
173 struct mbuf *vr_mbuf;
174 struct vr_chain_onefrag *vr_nextdesc;
175 };
176
177 struct vr_chain_data {
178 struct vr_chain_onefrag vr_rx_chain[VR_RX_LIST_CNT];
179 struct vr_chain vr_tx_chain[VR_TX_LIST_CNT];
180
181 struct vr_chain_onefrag *vr_rx_head;
182
183 struct vr_chain *vr_tx_head;
184 struct vr_chain *vr_tx_tail;
185 struct vr_chain *vr_tx_free;
186 };
187
188 struct vr_softc {
189 struct device vr_dev;
190 void *vr_ih;
191 void *vr_ats;
192 bus_space_tag_t vr_bustag;
193 bus_space_handle_t vr_bushandle;
194 pci_chipset_tag_t vr_pc;
195 struct ethercom vr_ec;
196 u_int8_t vr_enaddr[ETHER_ADDR_LEN];
197 struct ifmedia ifmedia; /* media info */
198 bus_space_handle_t vr_bhandle; /* bus space handle */
199 bus_space_tag_t vr_btag; /* bus space tag */
200 struct vr_type *vr_info; /* Rhine adapter info */
201 struct vr_type *vr_pinfo; /* phy info */
202 u_int8_t vr_unit; /* interface number */
203 u_int8_t vr_type;
204 u_int8_t vr_phy_addr; /* PHY address */
205 u_int8_t vr_tx_pend; /* TX pending */
206 u_int8_t vr_want_auto;
207 u_int8_t vr_autoneg;
208 caddr_t vr_ldata_ptr;
209 struct vr_list_data *vr_ldata;
210 struct vr_chain_data vr_cdata;
211 };
212
213 /*
214 * register space access macros
215 */
216 #define CSR_WRITE_4(sc, reg, val) \
217 bus_space_write_4(sc->vr_btag, sc->vr_bhandle, reg, val)
218 #define CSR_WRITE_2(sc, reg, val) \
219 bus_space_write_2(sc->vr_btag, sc->vr_bhandle, reg, val)
220 #define CSR_WRITE_1(sc, reg, val) \
221 bus_space_write_1(sc->vr_btag, sc->vr_bhandle, reg, val)
222
223 #define CSR_READ_4(sc, reg) \
224 bus_space_read_4(sc->vr_btag, sc->vr_bhandle, reg)
225 #define CSR_READ_2(sc, reg) \
226 bus_space_read_2(sc->vr_btag, sc->vr_bhandle, reg)
227 #define CSR_READ_1(sc, reg) \
228 bus_space_read_1(sc->vr_btag, sc->vr_bhandle, reg)
229
230 #define VR_TIMEOUT 1000
231
232 static int vr_newbuf __P((struct vr_softc *,
233 struct vr_chain_onefrag *));
234 static int vr_encap __P((struct vr_softc *, struct vr_chain *,
235 struct mbuf *));
236
237 static void vr_rxeof __P((struct vr_softc *));
238 static void vr_rxeoc __P((struct vr_softc *));
239 static void vr_txeof __P((struct vr_softc *));
240 static void vr_txeoc __P((struct vr_softc *));
241 static void vr_intr __P((void *));
242 static void vr_start __P((struct ifnet *));
243 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t));
244 static void vr_init __P((void *));
245 static void vr_stop __P((struct vr_softc *));
246 static void vr_watchdog __P((struct ifnet *));
247 static int vr_ifmedia_upd __P((struct ifnet *));
248 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
249
250 static void vr_mii_sync __P((struct vr_softc *));
251 static void vr_mii_send __P((struct vr_softc *, u_int32_t, int));
252 static int vr_mii_readreg __P((struct vr_softc *, struct vr_mii_frame *));
253 static int vr_mii_writereg __P((struct vr_softc *, struct vr_mii_frame *));
254 static u_int16_t vr_phy_readreg __P((struct vr_softc *, int));
255 static void vr_phy_writereg __P((struct vr_softc *, u_int16_t, u_int16_t));
256
257 static void vr_autoneg_xmit __P((struct vr_softc *));
258 static void vr_autoneg_mii __P((struct vr_softc *, int, int));
259 static void vr_setmode_mii __P((struct vr_softc *, int));
260 static void vr_getmode_mii __P((struct vr_softc *));
261 static void vr_setcfg __P((struct vr_softc *, u_int16_t));
262 static u_int8_t vr_calchash __P((u_int8_t *));
263 static void vr_setmulti __P((struct vr_softc *));
264 static void vr_reset __P((struct vr_softc *));
265 static int vr_list_rx_init __P((struct vr_softc *));
266 static int vr_list_tx_init __P((struct vr_softc *));
267
268 #define VR_SETBIT(sc, reg, x) \
269 CSR_WRITE_1(sc, reg, \
270 CSR_READ_1(sc, reg) | x)
271
272 #define VR_CLRBIT(sc, reg, x) \
273 CSR_WRITE_1(sc, reg, \
274 CSR_READ_1(sc, reg) & ~x)
275
276 #define VR_SETBIT16(sc, reg, x) \
277 CSR_WRITE_2(sc, reg, \
278 CSR_READ_2(sc, reg) | x)
279
280 #define VR_CLRBIT16(sc, reg, x) \
281 CSR_WRITE_2(sc, reg, \
282 CSR_READ_2(sc, reg) & ~x)
283
284 #define VR_SETBIT32(sc, reg, x) \
285 CSR_WRITE_4(sc, reg, \
286 CSR_READ_4(sc, reg) | x)
287
288 #define VR_CLRBIT32(sc, reg, x) \
289 CSR_WRITE_4(sc, reg, \
290 CSR_READ_4(sc, reg) & ~x)
291
292 #define SIO_SET(x) \
293 CSR_WRITE_1(sc, VR_MIICMD, \
294 CSR_READ_1(sc, VR_MIICMD) | x)
295
296 #define SIO_CLR(x) \
297 CSR_WRITE_1(sc, VR_MIICMD, \
298 CSR_READ_1(sc, VR_MIICMD) & ~x)
299
300 /*
301 * Sync the PHYs by setting data bit and strobing the clock 32 times.
302 */
303 static void vr_mii_sync(sc)
304 struct vr_softc *sc;
305 {
306 register int i;
307
308 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAOUT);
309
310 for (i = 0; i < 32; i++) {
311 SIO_SET(VR_MIICMD_CLK);
312 DELAY(1);
313 SIO_CLR(VR_MIICMD_CLK);
314 DELAY(1);
315 }
316
317 return;
318 }
319
320 /*
321 * Clock a series of bits through the MII.
322 */
323 static void vr_mii_send(sc, bits, cnt)
324 struct vr_softc *sc;
325 u_int32_t bits;
326 int cnt;
327 {
328 int i;
329
330 SIO_CLR(VR_MIICMD_CLK);
331
332 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
333 if (bits & i) {
334 SIO_SET(VR_MIICMD_DATAOUT);
335 } else {
336 SIO_CLR(VR_MIICMD_DATAOUT);
337 }
338 DELAY(1);
339 SIO_CLR(VR_MIICMD_CLK);
340 DELAY(1);
341 SIO_SET(VR_MIICMD_CLK);
342 }
343 }
344
345 /*
346 * Read an PHY register through the MII.
347 */
348 static int vr_mii_readreg(sc, frame)
349 struct vr_softc *sc;
350 struct vr_mii_frame *frame;
351
352 {
353 int i, ack, s;
354
355 s = splimp();
356
357 /*
358 * Set up frame for RX.
359 */
360 frame->mii_stdelim = MII_COMMAND_START;
361 frame->mii_opcode = MII_COMMAND_READ;
362 frame->mii_turnaround = 0;
363 frame->mii_data = 0;
364
365 CSR_WRITE_1(sc, VR_MIICMD, 0);
366 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
367
368 /*
369 * Turn on data xmit.
370 */
371 SIO_SET(VR_MIICMD_DIR);
372
373 vr_mii_sync(sc);
374
375 /*
376 * Send command/address info.
377 */
378 vr_mii_send(sc, frame->mii_stdelim, 2);
379 vr_mii_send(sc, frame->mii_opcode, 2);
380 vr_mii_send(sc, frame->mii_phyaddr, 5);
381 vr_mii_send(sc, frame->mii_regaddr, 5);
382
383 /* Idle bit */
384 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAOUT));
385 DELAY(1);
386 SIO_SET(VR_MIICMD_CLK);
387 DELAY(1);
388
389 /* Turn off xmit. */
390 SIO_CLR(VR_MIICMD_DIR);
391
392 /* Check for ack */
393 SIO_CLR(VR_MIICMD_CLK);
394 DELAY(1);
395 SIO_SET(VR_MIICMD_CLK);
396 DELAY(1);
397 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN;
398
399 /*
400 * Now try reading data bits. If the ack failed, we still
401 * need to clock through 16 cycles to keep the PHY(s) in sync.
402 */
403 if (ack) {
404 for (i = 0; i < 16; i++) {
405 SIO_CLR(VR_MIICMD_CLK);
406 DELAY(1);
407 SIO_SET(VR_MIICMD_CLK);
408 DELAY(1);
409 }
410 goto fail;
411 }
412
413 for (i = 0x8000; i; i >>= 1) {
414 SIO_CLR(VR_MIICMD_CLK);
415 DELAY(1);
416 if (!ack) {
417 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN)
418 frame->mii_data |= i;
419 DELAY(1);
420 }
421 SIO_SET(VR_MIICMD_CLK);
422 DELAY(1);
423 }
424
425 fail:
426
427 SIO_CLR(VR_MIICMD_CLK);
428 DELAY(1);
429 SIO_SET(VR_MIICMD_CLK);
430 DELAY(1);
431
432 splx(s);
433
434 if (ack)
435 return (1);
436 return (0);
437 }
438
439 /*
440 * Write to a PHY register through the MII.
441 */
442 static int vr_mii_writereg(sc, frame)
443 struct vr_softc *sc;
444 struct vr_mii_frame *frame;
445 {
446 int s;
447
448 s = splimp();
449
450 CSR_WRITE_1(sc, VR_MIICMD, 0);
451 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
452
453 /*
454 * Set up frame for TX.
455 */
456
457 frame->mii_stdelim = MII_COMMAND_START;
458 frame->mii_opcode = MII_COMMAND_WRITE;
459 frame->mii_turnaround = MII_COMMAND_ACK;
460
461 /*
462 * Turn on data output.
463 */
464 SIO_SET(VR_MIICMD_DIR);
465
466 vr_mii_sync(sc);
467
468 vr_mii_send(sc, frame->mii_stdelim, 2);
469 vr_mii_send(sc, frame->mii_opcode, 2);
470 vr_mii_send(sc, frame->mii_phyaddr, 5);
471 vr_mii_send(sc, frame->mii_regaddr, 5);
472 vr_mii_send(sc, frame->mii_turnaround, 2);
473 vr_mii_send(sc, frame->mii_data, 16);
474
475 /* Idle bit. */
476 SIO_SET(VR_MIICMD_CLK);
477 DELAY(1);
478 SIO_CLR(VR_MIICMD_CLK);
479 DELAY(1);
480
481 /*
482 * Turn off xmit.
483 */
484 SIO_CLR(VR_MIICMD_DIR);
485
486 splx(s);
487
488 return (0);
489 }
490
491 static u_int16_t vr_phy_readreg(sc, reg)
492 struct vr_softc *sc;
493 int reg;
494 {
495 struct vr_mii_frame frame;
496
497 bzero((char *)&frame, sizeof (frame));
498
499 frame.mii_phyaddr = sc->vr_phy_addr;
500 frame.mii_regaddr = reg;
501 vr_mii_readreg(sc, &frame);
502
503 return (frame.mii_data);
504 }
505
506 static void vr_phy_writereg(sc, reg, data)
507 struct vr_softc *sc;
508 u_int16_t reg;
509 u_int16_t data;
510 {
511 struct vr_mii_frame frame;
512
513 bzero((char *)&frame, sizeof (frame));
514
515 frame.mii_phyaddr = sc->vr_phy_addr;
516 frame.mii_regaddr = reg;
517 frame.mii_data = data;
518
519 vr_mii_writereg(sc, &frame);
520
521 return;
522 }
523
524 /*
525 * Calculate CRC of a multicast group address, return the lower 6 bits.
526 */
527 static u_int8_t vr_calchash(addr)
528 u_int8_t *addr;
529 {
530 u_int32_t crc, carry;
531 int i, j;
532 u_int8_t c;
533
534 /* Compute CRC for the address value. */
535 crc = 0xFFFFFFFF; /* initial value */
536
537 for (i = 0; i < 6; i++) {
538 c = *(addr + i);
539 for (j = 0; j < 8; j++) {
540 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
541 crc <<= 1;
542 c >>= 1;
543 if (carry)
544 crc = (crc ^ 0x04c11db6) | carry;
545 }
546 }
547
548 /* return the filter bit position */
549 return ((crc >> 26) & 0x0000003F);
550 }
551
552 /*
553 * Program the 64-bit multicast hash filter.
554 */
555 static void vr_setmulti(sc)
556 struct vr_softc *sc;
557 {
558 struct ifnet *ifp;
559 int h = 0;
560 u_int32_t hashes[2] = { 0, 0 };
561 struct ether_multistep step;
562 struct ether_multi *enm;
563 int mcnt = 0;
564 u_int8_t rxfilt;
565
566 ifp = &sc->vr_ec.ec_if;
567
568 rxfilt = CSR_READ_1(sc, VR_RXCFG);
569
570 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
571 rxfilt |= VR_RXCFG_RX_MULTI;
572 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
573 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
574 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
575 return;
576 }
577
578 /* first, zot all the existing hash bits */
579 CSR_WRITE_4(sc, VR_MAR0, 0);
580 CSR_WRITE_4(sc, VR_MAR1, 0);
581
582 /* now program new ones */
583 ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
584 while (enm != NULL) {
585 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0)
586 continue;
587
588 h = vr_calchash(enm->enm_addrlo);
589
590 if (h < 32)
591 hashes[0] |= (1 << h);
592 else
593 hashes[1] |= (1 << (h - 32));
594 ETHER_NEXT_MULTI(step, enm);
595 mcnt++;
596 }
597
598 if (mcnt)
599 rxfilt |= VR_RXCFG_RX_MULTI;
600 else
601 rxfilt &= ~VR_RXCFG_RX_MULTI;
602
603 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
604 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
605 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
606
607 return;
608 }
609
610 /*
611 * Initiate an autonegotiation session.
612 */
613 static void vr_autoneg_xmit(sc)
614 struct vr_softc *sc;
615 {
616 u_int16_t phy_sts;
617
618 vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
619 DELAY(500);
620 while (vr_phy_readreg(sc, PHY_BMCR)
621 & PHY_BMCR_RESET);
622
623 phy_sts = vr_phy_readreg(sc, PHY_BMCR);
624 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
625 vr_phy_writereg(sc, PHY_BMCR, phy_sts);
626
627 return;
628 }
629
630 /*
631 * Invoke autonegotiation on a PHY.
632 */
633 static void vr_autoneg_mii(sc, flag, verbose)
634 struct vr_softc *sc;
635 int flag;
636 int verbose;
637 {
638 u_int16_t phy_sts = 0, media, advert, ability;
639 struct ifnet *ifp;
640 struct ifmedia *ifm;
641
642 ifm = &sc->ifmedia;
643 ifp = &sc->vr_ec.ec_if;
644
645 ifm->ifm_media = IFM_ETHER | IFM_AUTO;
646
647 /*
648 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
649 * bit cleared in the status register, but has the 'autoneg enabled'
650 * bit set in the control register. This is a contradiction, and
651 * I'm not sure how to handle it. If you want to force an attempt
652 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
653 * and see what happens.
654 */
655 #ifndef FORCE_AUTONEG_TFOUR
656 /*
657 * First, see if autoneg is supported. If not, there's
658 * no point in continuing.
659 */
660 phy_sts = vr_phy_readreg(sc, PHY_BMSR);
661 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
662 if (verbose)
663 printf("%s: autonegotiation not supported\n",
664 sc->vr_dev.dv_xname);
665 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
666 return;
667 }
668 #endif
669
670 switch (flag) {
671 case VR_FLAG_FORCEDELAY:
672 /*
673 * XXX Never use this option anywhere but in the probe
674 * routine: making the kernel stop dead in its tracks
675 * for three whole seconds after we've gone multi-user
676 * is really bad manners.
677 */
678 vr_autoneg_xmit(sc);
679 DELAY(5000000);
680 break;
681 case VR_FLAG_SCHEDDELAY:
682 /*
683 * Wait for the transmitter to go idle before starting
684 * an autoneg session, otherwise vr_start() may clobber
685 * our timeout, and we don't want to allow transmission
686 * during an autoneg session since that can screw it up.
687 */
688 if (sc->vr_cdata.vr_tx_head != NULL) {
689 sc->vr_want_auto = 1;
690 return;
691 }
692 vr_autoneg_xmit(sc);
693 ifp->if_timer = 5;
694 sc->vr_autoneg = 1;
695 sc->vr_want_auto = 0;
696 return;
697 break;
698 case VR_FLAG_DELAYTIMEO:
699 ifp->if_timer = 0;
700 sc->vr_autoneg = 0;
701 break;
702 default:
703 printf("%s: invalid autoneg flag: %d\n",
704 sc->vr_dev.dv_xname, flag);
705 return;
706 }
707
708 if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
709 if (verbose)
710 printf("%s: autoneg complete, ",
711 sc->vr_dev.dv_xname);
712 phy_sts = vr_phy_readreg(sc, PHY_BMSR);
713 } else {
714 if (verbose)
715 printf("%s: autoneg not complete, ",
716 sc->vr_dev.dv_xname);
717 }
718
719 media = vr_phy_readreg(sc, PHY_BMCR);
720
721 /* Link is good. Report modes and set duplex mode. */
722 if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
723 if (verbose)
724 printf("link status good ");
725 advert = vr_phy_readreg(sc, PHY_ANAR);
726 ability = vr_phy_readreg(sc, PHY_LPAR);
727
728 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
729 ifm->ifm_media = IFM_ETHER|IFM_100_T4;
730 media |= PHY_BMCR_SPEEDSEL;
731 media &= ~PHY_BMCR_DUPLEX;
732 printf("(100baseT4)\n");
733 } else if (advert & PHY_ANAR_100BTXFULL &&
734 ability & PHY_ANAR_100BTXFULL) {
735 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
736 media |= PHY_BMCR_SPEEDSEL;
737 media |= PHY_BMCR_DUPLEX;
738 printf("(full-duplex, 100Mbps)\n");
739 } else if (advert & PHY_ANAR_100BTXHALF &&
740 ability & PHY_ANAR_100BTXHALF) {
741 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
742 media |= PHY_BMCR_SPEEDSEL;
743 media &= ~PHY_BMCR_DUPLEX;
744 printf("(half-duplex, 100Mbps)\n");
745 } else if (advert & PHY_ANAR_10BTFULL &&
746 ability & PHY_ANAR_10BTFULL) {
747 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
748 media &= ~PHY_BMCR_SPEEDSEL;
749 media |= PHY_BMCR_DUPLEX;
750 printf("(full-duplex, 10Mbps)\n");
751 } else {
752 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
753 media &= ~PHY_BMCR_SPEEDSEL;
754 media &= ~PHY_BMCR_DUPLEX;
755 printf("(half-duplex, 10Mbps)\n");
756 }
757
758 media &= ~PHY_BMCR_AUTONEGENBL;
759
760 /* Set ASIC's duplex mode to match the PHY. */
761 vr_setcfg(sc, media);
762 vr_phy_writereg(sc, PHY_BMCR, media);
763 } else {
764 if (verbose)
765 printf("no carrier\n");
766 }
767
768 vr_init(sc);
769
770 if (sc->vr_tx_pend) {
771 sc->vr_autoneg = 0;
772 sc->vr_tx_pend = 0;
773 vr_start(ifp);
774 }
775
776 return;
777 }
778
779 static void vr_getmode_mii(sc)
780 struct vr_softc *sc;
781 {
782 u_int16_t bmsr;
783 struct ifnet *ifp;
784
785 ifp = &sc->vr_ec.ec_if;
786
787 bmsr = vr_phy_readreg(sc, PHY_BMSR);
788
789 /* fallback */
790 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
791
792 if (bmsr & PHY_BMSR_10BTHALF) {
793 ifmedia_add(&sc->ifmedia,
794 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
795 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
796 }
797
798 if (bmsr & PHY_BMSR_10BTFULL) {
799 ifmedia_add(&sc->ifmedia,
800 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
801 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
802 }
803
804 if (bmsr & PHY_BMSR_100BTXHALF) {
805 ifp->if_baudrate = 100000000;
806 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
807 ifmedia_add(&sc->ifmedia,
808 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
809 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
810 }
811
812 if (bmsr & PHY_BMSR_100BTXFULL) {
813 ifp->if_baudrate = 100000000;
814 ifmedia_add(&sc->ifmedia,
815 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
816 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
817 }
818
819 /* Some also support 100BaseT4. */
820 if (bmsr & PHY_BMSR_100BT4) {
821 ifp->if_baudrate = 100000000;
822 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
823 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
824 #ifdef FORCE_AUTONEG_TFOUR
825 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0 NULL):
826 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
827 #endif
828 }
829
830 if (bmsr & PHY_BMSR_CANAUTONEG) {
831 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
832 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
833 }
834
835 return;
836 }
837
838 /*
839 * Set speed and duplex mode.
840 */
841 static void vr_setmode_mii(sc, media)
842 struct vr_softc *sc;
843 int media;
844 {
845 u_int16_t bmcr;
846 struct ifnet *ifp;
847
848 ifp = &sc->vr_ec.ec_if;
849
850 /*
851 * If an autoneg session is in progress, stop it.
852 */
853 if (sc->vr_autoneg) {
854 printf("%s: canceling autoneg session\n",
855 sc->vr_dev.dv_xname);
856 ifp->if_timer = sc->vr_autoneg = sc->vr_want_auto = 0;
857 bmcr = vr_phy_readreg(sc, PHY_BMCR);
858 bmcr &= ~PHY_BMCR_AUTONEGENBL;
859 vr_phy_writereg(sc, PHY_BMCR, bmcr);
860 }
861
862 printf("%s: selecting MII, ", sc->vr_dev.dv_xname);
863
864 bmcr = vr_phy_readreg(sc, PHY_BMCR);
865
866 bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
867 PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
868
869 if (IFM_SUBTYPE(media) == IFM_100_T4) {
870 printf("100Mbps/T4, half-duplex\n");
871 bmcr |= PHY_BMCR_SPEEDSEL;
872 bmcr &= ~PHY_BMCR_DUPLEX;
873 }
874
875 if (IFM_SUBTYPE(media) == IFM_100_TX) {
876 printf("100Mbps, ");
877 bmcr |= PHY_BMCR_SPEEDSEL;
878 }
879
880 if (IFM_SUBTYPE(media) == IFM_10_T) {
881 printf("10Mbps, ");
882 bmcr &= ~PHY_BMCR_SPEEDSEL;
883 }
884
885 if ((media & IFM_GMASK) == IFM_FDX) {
886 printf("full duplex\n");
887 bmcr |= PHY_BMCR_DUPLEX;
888 } else {
889 printf("half duplex\n");
890 bmcr &= ~PHY_BMCR_DUPLEX;
891 }
892
893 vr_setcfg(sc, bmcr);
894 vr_phy_writereg(sc, PHY_BMCR, bmcr);
895
896 return;
897 }
898
899 /*
900 * In order to fiddle with the
901 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
902 * first have to put the transmit and/or receive logic in the idle state.
903 */
904 static void vr_setcfg(sc, bmcr)
905 struct vr_softc *sc;
906 u_int16_t bmcr;
907 {
908 int restart = 0;
909
910 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
911 restart = 1;
912 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
913 }
914
915 if (bmcr & PHY_BMCR_DUPLEX)
916 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
917 else
918 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
919
920 if (restart)
921 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
922
923 return;
924 }
925
926 static void vr_reset(sc)
927 struct vr_softc *sc;
928 {
929 register int i;
930
931 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
932
933 for (i = 0; i < VR_TIMEOUT; i++) {
934 DELAY(10);
935 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
936 break;
937 }
938 if (i == VR_TIMEOUT)
939 printf("%s: reset never completed!\n",
940 sc->vr_dev.dv_xname);
941
942 /* Wait a little while for the chip to get its brains in order. */
943 DELAY(1000);
944
945 return;
946 }
947
948 /*
949 * Initialize the transmit descriptors.
950 */
951 static int vr_list_tx_init(sc)
952 struct vr_softc *sc;
953 {
954 struct vr_chain_data *cd;
955 struct vr_list_data *ld;
956 int i;
957
958 cd = &sc->vr_cdata;
959 ld = sc->vr_ldata;
960 for (i = 0; i < VR_TX_LIST_CNT; i++) {
961 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
962 if (i == (VR_TX_LIST_CNT - 1))
963 cd->vr_tx_chain[i].vr_nextdesc =
964 &cd->vr_tx_chain[0];
965 else
966 cd->vr_tx_chain[i].vr_nextdesc =
967 &cd->vr_tx_chain[i + 1];
968 }
969
970 cd->vr_tx_free = &cd->vr_tx_chain[0];
971 cd->vr_tx_tail = cd->vr_tx_head = NULL;
972
973 return (0);
974 }
975
976
977 /*
978 * Initialize the RX descriptors and allocate mbufs for them. Note that
979 * we arrange the descriptors in a closed ring, so that the last descriptor
980 * points back to the first.
981 */
982 static int vr_list_rx_init(sc)
983 struct vr_softc *sc;
984 {
985 struct vr_chain_data *cd;
986 struct vr_list_data *ld;
987 int i;
988
989 cd = &sc->vr_cdata;
990 ld = sc->vr_ldata;
991
992 for (i = 0; i < VR_RX_LIST_CNT; i++) {
993 cd->vr_rx_chain[i].vr_ptr =
994 (struct vr_desc *)&ld->vr_rx_list[i];
995 if (vr_newbuf(sc, &cd->vr_rx_chain[i]) == ENOBUFS)
996 return (ENOBUFS);
997 if (i == (VR_RX_LIST_CNT - 1)) {
998 cd->vr_rx_chain[i].vr_nextdesc =
999 &cd->vr_rx_chain[0];
1000 ld->vr_rx_list[i].vr_next =
1001 vtophys(&ld->vr_rx_list[0]);
1002 } else {
1003 cd->vr_rx_chain[i].vr_nextdesc =
1004 &cd->vr_rx_chain[i + 1];
1005 ld->vr_rx_list[i].vr_next =
1006 vtophys(&ld->vr_rx_list[i + 1]);
1007 }
1008 }
1009
1010 cd->vr_rx_head = &cd->vr_rx_chain[0];
1011
1012 return (0);
1013 }
1014
1015 /*
1016 * Initialize an RX descriptor and attach an MBUF cluster.
1017 * Note: the length fields are only 11 bits wide, which means the
1018 * largest size we can specify is 2047. This is important because
1019 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1020 * overflow the field and make a mess.
1021 */
1022 static int vr_newbuf(sc, c)
1023 struct vr_softc *sc;
1024 struct vr_chain_onefrag *c;
1025 {
1026 struct mbuf *m_new = NULL;
1027
1028 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1029 if (m_new == NULL) {
1030 printf("%s: no memory for rx list -- packet dropped!\n",
1031 sc->vr_dev.dv_xname);
1032 return (ENOBUFS);
1033 }
1034
1035 MCLGET(m_new, M_DONTWAIT);
1036 if (!(m_new->m_flags & M_EXT)) {
1037 printf("%s: no memory for rx list -- packet dropped!\n",
1038 sc->vr_dev.dv_xname);
1039 m_freem(m_new);
1040 return (ENOBUFS);
1041 }
1042
1043 c->vr_mbuf = m_new;
1044 c->vr_ptr->vr_status = VR_RXSTAT;
1045 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
1046 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
1047
1048 return (0);
1049 }
1050
1051 /*
1052 * A frame has been uploaded: pass the resulting mbuf chain up to
1053 * the higher level protocols.
1054 */
1055 static void vr_rxeof(sc)
1056 struct vr_softc *sc;
1057 {
1058 struct ether_header *eh;
1059 struct mbuf *m;
1060 struct ifnet *ifp;
1061 struct vr_chain_onefrag *cur_rx;
1062 int total_len = 0;
1063 u_int32_t rxstat;
1064
1065 ifp = &sc->vr_ec.ec_if;
1066
1067 while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
1068 VR_RXSTAT_OWN)) {
1069 cur_rx = sc->vr_cdata.vr_rx_head;
1070 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
1071
1072 /*
1073 * If an error occurs, update stats, clear the
1074 * status word and leave the mbuf cluster in place:
1075 * it should simply get re-used next time this descriptor
1076 * comes up in the ring.
1077 */
1078 if (rxstat & VR_RXSTAT_RXERR) {
1079 ifp->if_ierrors++;
1080 printf("%s: rx error: ", sc->vr_dev.dv_xname);
1081 switch (rxstat & 0x000000FF) {
1082 case VR_RXSTAT_CRCERR:
1083 printf("crc error\n");
1084 break;
1085 case VR_RXSTAT_FRAMEALIGNERR:
1086 printf("frame alignment error\n");
1087 break;
1088 case VR_RXSTAT_FIFOOFLOW:
1089 printf("FIFO overflow\n");
1090 break;
1091 case VR_RXSTAT_GIANT:
1092 printf("received giant packet\n");
1093 break;
1094 case VR_RXSTAT_RUNT:
1095 printf("received runt packet\n");
1096 break;
1097 case VR_RXSTAT_BUSERR:
1098 printf("system bus error\n");
1099 break;
1100 case VR_RXSTAT_BUFFERR:
1101 printf("rx buffer error\n");
1102 break;
1103 default:
1104 printf("unknown rx error\n");
1105 break;
1106 }
1107 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
1108 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
1109 continue;
1110 }
1111
1112 /* No errors; receive the packet. */
1113 m = cur_rx->vr_mbuf;
1114 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1115
1116 /*
1117 * XXX The VIA Rhine chip includes the CRC with every
1118 * received frame, and there's no way to turn this
1119 * behavior off (at least, I can't find anything in
1120 * the manual that explains how to do it) so we have
1121 * to trim off the CRC manually.
1122 */
1123 total_len -= ETHER_CRC_LEN;
1124
1125 /*
1126 * Try to conjure up a new mbuf cluster. If that
1127 * fails, it means we have an out of memory condition and
1128 * should leave the buffer in place and continue. This will
1129 * result in a lost packet, but there's little else we
1130 * can do in this situation.
1131 */
1132 if (vr_newbuf(sc, cur_rx) == ENOBUFS) {
1133 ifp->if_ierrors++;
1134 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
1135 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
1136 continue;
1137 }
1138
1139 ifp->if_ipackets++;
1140 eh = mtod(m, struct ether_header *);
1141 m->m_pkthdr.rcvif = ifp;
1142 m->m_pkthdr.len = m->m_len = total_len;
1143 #if NBPFILTER > 0
1144 /*
1145 * Handle BPF listeners. Let the BPF user see the packet, but
1146 * don't pass it up to the ether_input() layer unless it's
1147 * a broadcast packet, multicast packet, matches our ethernet
1148 * address or the interface is in promiscuous mode.
1149 */
1150 if (ifp->if_bpf) {
1151 bpf_mtap(ifp->if_bpf, m);
1152 if (ifp->if_flags & IFF_PROMISC &&
1153 (memcmp(eh->ether_dhost, sc->vr_enaddr,
1154 ETHER_ADDR_LEN) &&
1155 (eh->ether_dhost[0] & 1) == 0)) {
1156 m_freem(m);
1157 continue;
1158 }
1159 }
1160 #endif
1161 /* Remove header from mbuf and pass it on. */
1162 m_adj(m, sizeof (struct ether_header));
1163 ether_input(ifp, eh, m);
1164 }
1165
1166 return;
1167 }
1168
1169 void vr_rxeoc(sc)
1170 struct vr_softc *sc;
1171 {
1172
1173 vr_rxeof(sc);
1174 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1175 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1176 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1177 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1178
1179 return;
1180 }
1181
1182 /*
1183 * A frame was downloaded to the chip. It's safe for us to clean up
1184 * the list buffers.
1185 */
1186
1187 static void vr_txeof(sc)
1188 struct vr_softc *sc;
1189 {
1190 struct vr_chain *cur_tx;
1191 struct ifnet *ifp;
1192 register struct mbuf *n;
1193
1194 ifp = &sc->vr_ec.ec_if;
1195
1196 /* Clear the timeout timer. */
1197 ifp->if_timer = 0;
1198
1199 /* Sanity check. */
1200 if (sc->vr_cdata.vr_tx_head == NULL)
1201 return;
1202
1203 /*
1204 * Go through our tx list and free mbufs for those
1205 * frames that have been transmitted.
1206 */
1207 while (sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1208 u_int32_t txstat;
1209
1210 cur_tx = sc->vr_cdata.vr_tx_head;
1211 txstat = cur_tx->vr_ptr->vr_status;
1212
1213 if (txstat & VR_TXSTAT_OWN)
1214 break;
1215
1216 if (txstat & VR_TXSTAT_ERRSUM) {
1217 ifp->if_oerrors++;
1218 if (txstat & VR_TXSTAT_DEFER)
1219 ifp->if_collisions++;
1220 if (txstat & VR_TXSTAT_LATECOLL)
1221 ifp->if_collisions++;
1222 }
1223
1224 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1225
1226 ifp->if_opackets++;
1227 MFREE(cur_tx->vr_mbuf, n);
1228 cur_tx->vr_mbuf = NULL;
1229
1230 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1231 sc->vr_cdata.vr_tx_head = NULL;
1232 sc->vr_cdata.vr_tx_tail = NULL;
1233 break;
1234 }
1235
1236 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1237 }
1238
1239 return;
1240 }
1241
1242 /*
1243 * TX 'end of channel' interrupt handler.
1244 */
1245 static void vr_txeoc(sc)
1246 struct vr_softc *sc;
1247 {
1248 struct ifnet *ifp;
1249
1250 ifp = &sc->vr_ec.ec_if;
1251
1252 ifp->if_timer = 0;
1253
1254 if (sc->vr_cdata.vr_tx_head == NULL) {
1255 ifp->if_flags &= ~IFF_OACTIVE;
1256 sc->vr_cdata.vr_tx_tail = NULL;
1257 if (sc->vr_want_auto)
1258 vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1259 }
1260
1261 return;
1262 }
1263
1264 static void vr_intr(arg)
1265 void *arg;
1266 {
1267 struct vr_softc *sc;
1268 struct ifnet *ifp;
1269 u_int16_t status;
1270
1271 sc = arg;
1272 ifp = &sc->vr_ec.ec_if;
1273
1274 /* Supress unwanted interrupts. */
1275 if (!(ifp->if_flags & IFF_UP)) {
1276 vr_stop(sc);
1277 return;
1278 }
1279
1280 /* Disable interrupts. */
1281 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1282
1283 for (;;) {
1284
1285 status = CSR_READ_2(sc, VR_ISR);
1286 if (status)
1287 CSR_WRITE_2(sc, VR_ISR, status);
1288
1289 if ((status & VR_INTRS) == 0)
1290 break;
1291
1292 if (status & VR_ISR_RX_OK)
1293 vr_rxeof(sc);
1294
1295 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1296 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
1297 (status & VR_ISR_RX_DROPPED)) {
1298 vr_rxeof(sc);
1299 vr_rxeoc(sc);
1300 }
1301
1302 if (status & VR_ISR_TX_OK) {
1303 vr_txeof(sc);
1304 vr_txeoc(sc);
1305 }
1306
1307 if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)) {
1308 ifp->if_oerrors++;
1309 vr_txeof(sc);
1310 if (sc->vr_cdata.vr_tx_head != NULL) {
1311 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1312 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1313 }
1314 }
1315
1316 if (status & VR_ISR_BUSERR) {
1317 vr_reset(sc);
1318 vr_init(sc);
1319 }
1320 }
1321
1322 /* Re-enable interrupts. */
1323 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1324
1325 if (ifp->if_snd.ifq_head != NULL) {
1326 vr_start(ifp);
1327 }
1328
1329 return;
1330 }
1331
1332 /*
1333 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1334 * pointers to the fragment pointers.
1335 */
1336 static int vr_encap(sc, c, m_head)
1337 struct vr_softc *sc;
1338 struct vr_chain *c;
1339 struct mbuf *m_head;
1340 {
1341 int frag = 0;
1342 struct vr_desc *f = NULL;
1343 int total_len;
1344 struct mbuf *m;
1345
1346 m = m_head;
1347 total_len = 0;
1348
1349 /*
1350 * The VIA Rhine wants packet buffers to be longword
1351 * aligned, but very often our mbufs aren't. Rather than
1352 * waste time trying to decide when to copy and when not
1353 * to copy, just do it all the time.
1354 */
1355 if (m != NULL) {
1356 struct mbuf *m_new = NULL;
1357
1358 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1359 if (m_new == NULL) {
1360 printf("%s: no memory for tx list",
1361 sc->vr_dev.dv_xname);
1362 return (1);
1363 }
1364 if (m_head->m_pkthdr.len > MHLEN) {
1365 MCLGET(m_new, M_DONTWAIT);
1366 if (!(m_new->m_flags & M_EXT)) {
1367 m_freem(m_new);
1368 printf("%s: no memory for tx list",
1369 sc->vr_dev.dv_xname);
1370 return (1);
1371 }
1372 }
1373 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1374 mtod(m_new, caddr_t));
1375 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1376 m_freem(m_head);
1377 m_head = m_new;
1378 /*
1379 * The Rhine chip doesn't auto-pad, so we have to make
1380 * sure to pad short frames out to the minimum frame length
1381 * ourselves.
1382 */
1383 if (m_head->m_len < VR_MIN_FRAMELEN) {
1384 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1385 m_new->m_len = m_new->m_pkthdr.len;
1386 }
1387 f = c->vr_ptr;
1388 f->vr_data = vtophys(mtod(m_new, caddr_t));
1389 f->vr_ctl = total_len = m_new->m_len;
1390 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1391 f->vr_status = 0;
1392 frag = 1;
1393 }
1394
1395 c->vr_mbuf = m_head;
1396 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1397 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1398
1399 return (0);
1400 }
1401
1402 /*
1403 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1404 * to the mbuf data regions directly in the transmit lists. We also save a
1405 * copy of the pointers since the transmit list fragment pointers are
1406 * physical addresses.
1407 */
1408
1409 static void vr_start(ifp)
1410 struct ifnet *ifp;
1411 {
1412 struct vr_softc *sc;
1413 struct mbuf *m_head = NULL;
1414 struct vr_chain *cur_tx = NULL, *start_tx;
1415
1416 sc = ifp->if_softc;
1417
1418 if (sc->vr_autoneg) {
1419 sc->vr_tx_pend = 1;
1420 return;
1421 }
1422
1423 /*
1424 * Check for an available queue slot. If there are none,
1425 * punt.
1426 */
1427 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1428 ifp->if_flags |= IFF_OACTIVE;
1429 return;
1430 }
1431
1432 start_tx = sc->vr_cdata.vr_tx_free;
1433
1434 while (sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1435 IF_DEQUEUE(&ifp->if_snd, m_head);
1436 if (m_head == NULL)
1437 break;
1438
1439 /* Pick a descriptor off the free list. */
1440 cur_tx = sc->vr_cdata.vr_tx_free;
1441 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1442
1443 /* Pack the data into the descriptor. */
1444 vr_encap(sc, cur_tx, m_head);
1445
1446 if (cur_tx != start_tx)
1447 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1448
1449 #if NBPFILTER > 0
1450 /*
1451 * If there's a BPF listener, bounce a copy of this frame
1452 * to him.
1453 */
1454 if (ifp->if_bpf)
1455 bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf);
1456 #endif
1457 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1458 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1459 }
1460
1461 /*
1462 * If there are no frames queued, bail.
1463 */
1464 if (cur_tx == NULL)
1465 return;
1466
1467 sc->vr_cdata.vr_tx_tail = cur_tx;
1468
1469 if (sc->vr_cdata.vr_tx_head == NULL)
1470 sc->vr_cdata.vr_tx_head = start_tx;
1471
1472 /*
1473 * Set a timeout in case the chip goes out to lunch.
1474 */
1475 ifp->if_timer = 5;
1476
1477 return;
1478 }
1479
1480 static void vr_init(xsc)
1481 void *xsc;
1482 {
1483 struct vr_softc *sc = xsc;
1484 struct ifnet *ifp = &sc->vr_ec.ec_if;
1485 u_int16_t phy_bmcr = 0;
1486 int s;
1487
1488 if (sc->vr_autoneg)
1489 return;
1490
1491 s = splimp();
1492
1493 if (sc->vr_pinfo != NULL)
1494 phy_bmcr = vr_phy_readreg(sc, PHY_BMCR);
1495
1496 /*
1497 * Cancel pending I/O and free all RX/TX buffers.
1498 */
1499 vr_stop(sc);
1500 vr_reset(sc);
1501
1502 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1503 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1504
1505 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1506 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1507
1508 /* Init circular RX list. */
1509 if (vr_list_rx_init(sc) == ENOBUFS) {
1510 printf("%s: initialization failed: no "
1511 "memory for rx buffers\n", sc->vr_dev.dv_xname);
1512 vr_stop(sc);
1513 (void)splx(s);
1514 return;
1515 }
1516
1517 /*
1518 * Init tx descriptors.
1519 */
1520 vr_list_tx_init(sc);
1521
1522 /* If we want promiscuous mode, set the allframes bit. */
1523 if (ifp->if_flags & IFF_PROMISC)
1524 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1525 else
1526 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1527
1528 /* Set capture broadcast bit to capture broadcast frames. */
1529 if (ifp->if_flags & IFF_BROADCAST)
1530 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1531 else
1532 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1533
1534 /*
1535 * Program the multicast filter, if necessary.
1536 */
1537 vr_setmulti(sc);
1538
1539 /*
1540 * Load the address of the RX list.
1541 */
1542 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1543
1544 /* Enable receiver and transmitter. */
1545 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1546 VR_CMD_TX_ON|VR_CMD_RX_ON|
1547 VR_CMD_RX_GO);
1548
1549 vr_setcfg(sc, vr_phy_readreg(sc, PHY_BMCR));
1550
1551 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1552
1553 /*
1554 * Enable interrupts.
1555 */
1556 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1557 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1558
1559 /* Restore state of BMCR */
1560 if (sc->vr_pinfo != NULL)
1561 vr_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1562
1563 ifp->if_flags |= IFF_RUNNING;
1564 ifp->if_flags &= ~IFF_OACTIVE;
1565
1566 (void)splx(s);
1567
1568 return;
1569 }
1570
1571 /*
1572 * Set media options.
1573 */
1574 static int vr_ifmedia_upd(ifp)
1575 struct ifnet *ifp;
1576 {
1577 struct vr_softc *sc;
1578 struct ifmedia *ifm;
1579
1580 sc = ifp->if_softc;
1581 ifm = &sc->ifmedia;
1582
1583 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1584 return (EINVAL);
1585
1586 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1587 vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1588 else
1589 vr_setmode_mii(sc, ifm->ifm_media);
1590
1591 return (0);
1592 }
1593
1594 /*
1595 * Report current media status.
1596 */
1597 static void vr_ifmedia_sts(ifp, ifmr)
1598 struct ifnet *ifp;
1599 struct ifmediareq *ifmr;
1600 {
1601 struct vr_softc *sc;
1602 u_int16_t advert = 0, ability = 0;
1603
1604 sc = ifp->if_softc;
1605
1606 ifmr->ifm_active = IFM_ETHER;
1607
1608 if (!(vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1609 if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1610 ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
1611 else
1612 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1613 if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1614 ifmr->ifm_active |= IFM_FDX;
1615 else
1616 ifmr->ifm_active |= IFM_HDX;
1617 return;
1618 }
1619
1620 ability = vr_phy_readreg(sc, PHY_LPAR);
1621 advert = vr_phy_readreg(sc, PHY_ANAR);
1622 if (advert & PHY_ANAR_100BT4 &&
1623 ability & PHY_ANAR_100BT4) {
1624 ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
1625 } else if (advert & PHY_ANAR_100BTXFULL &&
1626 ability & PHY_ANAR_100BTXFULL) {
1627 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
1628 } else if (advert & PHY_ANAR_100BTXHALF &&
1629 ability & PHY_ANAR_100BTXHALF) {
1630 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
1631 } else if (advert & PHY_ANAR_10BTFULL &&
1632 ability & PHY_ANAR_10BTFULL) {
1633 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
1634 } else if (advert & PHY_ANAR_10BTHALF &&
1635 ability & PHY_ANAR_10BTHALF) {
1636 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
1637 }
1638
1639 return;
1640 }
1641
1642 static int vr_ioctl(ifp, command, data)
1643 struct ifnet *ifp;
1644 u_long command;
1645 caddr_t data;
1646 {
1647 struct vr_softc *sc = ifp->if_softc;
1648 struct ifreq *ifr = (struct ifreq *)data;
1649 struct ifaddr *ifa = (struct ifaddr *)data;
1650 int s, error = 0;
1651
1652 s = splimp();
1653
1654 switch (command) {
1655 case SIOCSIFADDR:
1656 ifp->if_flags |= IFF_UP;
1657
1658 switch (ifa->ifa_addr->sa_family) {
1659 #ifdef INET
1660 case AF_INET:
1661 vr_init(sc);
1662 arp_ifinit(ifp, ifa);
1663 break;
1664 #endif /* INET */
1665 default:
1666 vr_init(sc);
1667 break;
1668 }
1669 break;
1670
1671 case SIOCGIFADDR:
1672 bcopy((caddr_t) sc->vr_enaddr,
1673 (caddr_t) ((struct sockaddr *)&ifr->ifr_data)->sa_data,
1674 ETHER_ADDR_LEN);
1675 break;
1676
1677 case SIOCSIFMTU:
1678 if (ifr->ifr_mtu > ETHERMTU)
1679 error = EINVAL;
1680 else
1681 ifp->if_mtu = ifr->ifr_mtu;
1682 break;
1683
1684 case SIOCSIFFLAGS:
1685 if (ifp->if_flags & IFF_UP) {
1686 vr_init(sc);
1687 } else {
1688 if (ifp->if_flags & IFF_RUNNING)
1689 vr_stop(sc);
1690 }
1691 error = 0;
1692 break;
1693 case SIOCADDMULTI:
1694 case SIOCDELMULTI:
1695 if (command == SIOCADDMULTI)
1696 error = ether_addmulti(ifr, &sc->vr_ec);
1697 else
1698 error = ether_delmulti(ifr, &sc->vr_ec);
1699
1700 if (error == ENETRESET) {
1701 vr_setmulti(sc);
1702 error = 0;
1703 }
1704 break;
1705 case SIOCGIFMEDIA:
1706 case SIOCSIFMEDIA:
1707 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1708 break;
1709 default:
1710 error = EINVAL;
1711 break;
1712 }
1713
1714 (void)splx(s);
1715
1716 return (error);
1717 }
1718
1719 static void vr_watchdog(ifp)
1720 struct ifnet *ifp;
1721 {
1722 struct vr_softc *sc;
1723
1724 sc = ifp->if_softc;
1725
1726 if (sc->vr_autoneg) {
1727 vr_autoneg_mii(sc, VR_FLAG_DELAYTIMEO, 1);
1728 return;
1729 }
1730
1731 ifp->if_oerrors++;
1732 printf("%s: watchdog timeout\n", sc->vr_dev.dv_xname);
1733
1734 if (!(vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1735 printf("%s: no carrier - transceiver cable problem?\n",
1736 sc->vr_dev.dv_xname);
1737
1738 vr_stop(sc);
1739 vr_reset(sc);
1740 vr_init(sc);
1741
1742 if (ifp->if_snd.ifq_head != NULL)
1743 vr_start(ifp);
1744
1745 return;
1746 }
1747
1748 /*
1749 * Stop the adapter and free any mbufs allocated to the
1750 * RX and TX lists.
1751 */
1752 static void vr_stop(sc)
1753 struct vr_softc *sc;
1754 {
1755 register int i;
1756 struct ifnet *ifp;
1757
1758 ifp = &sc->vr_ec.ec_if;
1759 ifp->if_timer = 0;
1760
1761 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1762 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1763 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1764 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1765 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1766
1767 /*
1768 * Free data in the RX lists.
1769 */
1770 for (i = 0; i < VR_RX_LIST_CNT; i++) {
1771 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1772 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1773 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1774 }
1775 }
1776 bzero((char *)&sc->vr_ldata->vr_rx_list,
1777 sizeof (sc->vr_ldata->vr_rx_list));
1778
1779 /*
1780 * Free the TX list buffers.
1781 */
1782 for (i = 0; i < VR_TX_LIST_CNT; i++) {
1783 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1784 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1785 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1786 }
1787 }
1788
1789 bzero((char *)&sc->vr_ldata->vr_tx_list,
1790 sizeof (sc->vr_ldata->vr_tx_list));
1791
1792 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1793
1794 return;
1795 }
1796
1797 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1798 static int vr_probe __P((struct device *, struct cfdata *, void *));
1799 static void vr_attach __P((struct device *, struct device *, void *));
1800 static void vr_shutdown __P((void *));
1801
1802 struct cfattach vr_ca = {
1803 sizeof (struct vr_softc), vr_probe, vr_attach
1804 };
1805
1806 static struct vr_type *
1807 vr_lookup(pa)
1808 struct pci_attach_args *pa;
1809 {
1810 struct vr_type *vrt;
1811
1812 for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1813 if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1814 PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1815 return (vrt);
1816 }
1817 return (NULL);
1818 }
1819
1820 static int
1821 vr_probe(parent, match, aux)
1822 struct device *parent;
1823 struct cfdata *match;
1824 void *aux;
1825 {
1826 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1827
1828 if (vr_lookup(pa) != NULL)
1829 return (1);
1830
1831 return (0);
1832 }
1833
1834 /*
1835 * Stop all chip I/O so that the kernel's probe routines don't
1836 * get confused by errant DMAs when rebooting.
1837 */
1838 static void vr_shutdown(arg)
1839 void *arg;
1840 {
1841 struct vr_softc *sc = (struct vr_softc *)arg;
1842
1843 vr_stop(sc);
1844
1845 return;
1846 }
1847
1848 /*
1849 * Attach the interface. Allocate softc structures, do ifmedia
1850 * setup and ethernet/BPF attach.
1851 */
1852 static void
1853 vr_attach(parent, self, aux)
1854 struct device * const parent;
1855 struct device * const self;
1856 void * const aux;
1857 {
1858 #define PCI_CONF_WRITE(r, v) pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1859 #define PCI_CONF_READ(r) pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1860 struct vr_softc * const sc = (struct vr_softc *) self;
1861 struct pci_attach_args * const pa = (struct pci_attach_args *) aux;
1862 struct vr_type *vrt;
1863 int i;
1864 u_int32_t command;
1865 struct ifnet *ifp;
1866 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1867 unsigned int round;
1868 caddr_t roundptr;
1869 u_char eaddr[ETHER_ADDR_LEN];
1870 struct vr_type *p;
1871 u_int16_t phy_vid, phy_did, phy_sts;
1872
1873 vrt = vr_lookup(pa);
1874 if (vrt == NULL) {
1875 printf("\n");
1876 panic("vr_attach: impossible");
1877 }
1878
1879 printf(": %s Ethernet\n", vrt->vr_name);
1880
1881 /*
1882 * Handle power management nonsense.
1883 */
1884
1885 command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
1886 if (command == 0x01) {
1887
1888 command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
1889 if (command & VR_PSTATE_MASK) {
1890 u_int32_t iobase, membase, irq;
1891
1892 /* Save important PCI config data. */
1893 iobase = PCI_CONF_READ(VR_PCI_LOIO);
1894 membase = PCI_CONF_READ(VR_PCI_LOMEM);
1895 irq = PCI_CONF_READ(VR_PCI_INTLINE);
1896
1897 /* Reset the power state. */
1898 printf("%s: chip is in D%d power mode "
1899 "-- setting to D0\n",
1900 sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
1901 command &= 0xFFFFFFFC;
1902 PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
1903
1904 /* Restore PCI config data. */
1905 PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1906 PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1907 PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
1908 }
1909 }
1910
1911 /*
1912 * Map control/status registers.
1913 */
1914 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1915 command |= (PCI_COMMAND_IO_ENABLE |
1916 PCI_COMMAND_MEM_ENABLE |
1917 PCI_COMMAND_MASTER_ENABLE);
1918 PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
1919 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1920
1921 {
1922 bus_space_tag_t iot, memt;
1923 bus_space_handle_t ioh, memh;
1924 int ioh_valid, memh_valid;
1925 pci_intr_handle_t intrhandle;
1926 const char *intrstr;
1927
1928 ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1929 PCI_MAPREG_TYPE_IO, 0,
1930 &iot, &ioh, NULL, NULL) == 0);
1931 memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1932 PCI_MAPREG_TYPE_MEM |
1933 PCI_MAPREG_MEM_TYPE_32BIT,
1934 0, &memt, &memh, NULL, NULL) == 0);
1935 #if defined(VR_USEIOSPACE)
1936 if (ioh_valid) {
1937 sc->vr_btag = iot;
1938 sc->vr_bhandle = ioh;
1939 } else if (memh_valid) {
1940 sc->vr_btag = memt;
1941 sc->vr_bhandle = memh;
1942 }
1943 #else
1944 if (memh_valid) {
1945 sc->vr_btag = memt;
1946 sc->vr_bhandle = memh;
1947 } else if (ioh_valid) {
1948 sc->vr_btag = iot;
1949 sc->vr_bhandle = ioh;
1950 }
1951 #endif
1952 else {
1953 printf(": unable to map device registers\n");
1954 return;
1955 }
1956
1957 /* Allocate interrupt */
1958 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
1959 pa->pa_intrline, &intrhandle)) {
1960 printf("%s: couldn't map interrupt\n",
1961 sc->vr_dev.dv_xname);
1962 goto fail;
1963 }
1964 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1965 sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1966 (void *)vr_intr, sc);
1967 if (sc->vr_ih == NULL) {
1968 printf("%s: couldn't establish interrupt",
1969 sc->vr_dev.dv_xname);
1970 if (intrstr != NULL)
1971 printf(" at %s", intrstr);
1972 printf("\n");
1973 }
1974 printf("%s: interrupting at %s\n",
1975 sc->vr_dev.dv_xname, intrstr);
1976 }
1977 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1978 if (sc->vr_ats == NULL)
1979 printf("%s: warning: couldn't establish shutdown hook\n",
1980 sc->vr_dev.dv_xname);
1981
1982 /* Reset the adapter. */
1983 vr_reset(sc);
1984
1985 /*
1986 * Get station address. The way the Rhine chips work,
1987 * you're not allowed to directly access the EEPROM once
1988 * they've been programmed a special way. Consequently,
1989 * we need to read the node address from the PAR0 and PAR1
1990 * registers.
1991 */
1992 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1993 DELAY(200);
1994 for (i = 0; i < ETHER_ADDR_LEN; i++)
1995 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1996
1997 /*
1998 * A Rhine chip was detected. Inform the world.
1999 */
2000 printf("%s: Ethernet address: %s\n",
2001 sc->vr_dev.dv_xname, ether_sprintf(eaddr));
2002
2003 bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN);
2004
2005 sc->vr_ldata_ptr = malloc(sizeof (struct vr_list_data) + 8,
2006 M_DEVBUF, M_NOWAIT);
2007 if (sc->vr_ldata_ptr == NULL) {
2008 free(sc, M_DEVBUF);
2009 printf("%s: no memory for list buffers!\n",
2010 sc->vr_dev.dv_xname);
2011 return;
2012 }
2013
2014 sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr;
2015 round = (unsigned long)sc->vr_ldata_ptr & 0xF;
2016 roundptr = sc->vr_ldata_ptr;
2017 for (i = 0; i < 8; i++) {
2018 if (round % 8) {
2019 round++;
2020 roundptr++;
2021 } else
2022 break;
2023 }
2024 sc->vr_ldata = (struct vr_list_data *)roundptr;
2025 bzero(sc->vr_ldata, sizeof (struct vr_list_data));
2026
2027 ifp = &sc->vr_ec.ec_if;
2028 ifp->if_softc = sc;
2029 ifp->if_mtu = ETHERMTU;
2030 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2031 ifp->if_ioctl = vr_ioctl;
2032 ifp->if_output = ether_output;
2033 ifp->if_start = vr_start;
2034 ifp->if_watchdog = vr_watchdog;
2035 ifp->if_baudrate = 10000000;
2036 bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2037
2038 for (i = VR_PHYADDR_MIN; i < VR_PHYADDR_MAX + 1; i++) {
2039 sc->vr_phy_addr = i;
2040 vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
2041 DELAY(500);
2042 while (vr_phy_readreg(sc, PHY_BMCR)
2043 & PHY_BMCR_RESET);
2044 if ((phy_sts = vr_phy_readreg(sc, PHY_BMSR)))
2045 break;
2046 }
2047 if (phy_sts) {
2048 phy_vid = vr_phy_readreg(sc, PHY_VENID);
2049 phy_did = vr_phy_readreg(sc, PHY_DEVID);
2050 p = vr_phys;
2051 while (p->vr_vid) {
2052 if (phy_vid == p->vr_vid &&
2053 (phy_did | 0x000F) == p->vr_did) {
2054 sc->vr_pinfo = p;
2055 break;
2056 }
2057 p++;
2058 }
2059 if (sc->vr_pinfo == NULL)
2060 sc->vr_pinfo = &vr_phys[PHY_UNKNOWN];
2061 } else {
2062 printf("%s: MII without any phy!\n",
2063 sc->vr_dev.dv_xname);
2064 goto fail;
2065 }
2066
2067 /*
2068 * Do ifmedia setup.
2069 */
2070 ifmedia_init(&sc->ifmedia, 0, vr_ifmedia_upd, vr_ifmedia_sts);
2071
2072 vr_getmode_mii(sc);
2073 vr_autoneg_mii(sc, VR_FLAG_FORCEDELAY, 1);
2074 media = sc->ifmedia.ifm_media;
2075 vr_stop(sc);
2076
2077 ifmedia_set(&sc->ifmedia, media);
2078
2079 /*
2080 * Call MI attach routines.
2081 */
2082 if_attach(ifp);
2083 ether_ifattach(ifp, sc->vr_enaddr);
2084
2085 #if NBPFILTER > 0
2086 bpfattach(&sc->vr_ec.ec_if.if_bpf,
2087 ifp, DLT_EN10MB, sizeof (struct ether_header));
2088 #endif
2089
2090 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
2091 if (sc->vr_ats == NULL)
2092 printf("%s: warning: couldn't establish shutdown hook\n",
2093 sc->vr_dev.dv_xname);
2094
2095 fail:
2096 return;
2097 }
2098