if_vr.c revision 1.7 1 /* $NetBSD: if_vr.c,v 1.7 1999/02/02 00:29:17 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
35 */
36
37 /*
38 * VIA Rhine fast ethernet PCI NIC driver
39 *
40 * Supports various network adapters based on the VIA Rhine
41 * and Rhine II PCI controllers, including the D-Link DFE530TX.
42 * Datasheets are available at http://www.via.com.tw.
43 *
44 * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48
49 /*
50 * The VIA Rhine controllers are similar in some respects to the
51 * the DEC tulip chips, except less complicated. The controller
52 * uses an MII bus and an external physical layer interface. The
53 * receiver has a one entry perfect filter and a 64-bit hash table
54 * multicast filter. Transmit and receive descriptors are similar
55 * to the tulip.
56 *
57 * The Rhine has a serious flaw in its transmit DMA mechanism:
58 * transmit buffers must be longword aligned. Unfortunately,
59 * FreeBSD doesn't guarantee that mbufs will be filled in starting
60 * at longword boundaries, so we have to do a buffer copy before
61 * transmission.
62 */
63
64 #include "opt_inet.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/sockio.h>
69 #include <sys/mbuf.h>
70 #include <sys/malloc.h>
71 #include <sys/kernel.h>
72 #include <sys/socket.h>
73 #include <sys/device.h>
74
75 #include <net/if.h>
76 #include <net/if_arp.h>
77 #include <net/if_dl.h>
78 #include <net/if_media.h>
79 #include <net/if_ether.h>
80
81 #if defined(INET)
82 #include <netinet/in.h>
83 #include <netinet/if_inarp.h>
84 #endif
85
86 #include "bpfilter.h"
87 #if NBPFILTER > 0
88 #include <net/bpf.h>
89 #endif
90
91 #include <vm/vm.h> /* for vtophys */
92
93 #include <machine/bus.h>
94 #include <machine/intr.h>
95
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/if_vrreg.h>
99
100 #if defined(__NetBSD__) && defined(__alpha__)
101 /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */
102 #undef vtophys
103 #define vtophys(va) alpha_XXX_dmamap((vaddr_t)(va))
104 #endif
105
106 #define VR_USEIOSPACE
107
108 /* #define VR_BACKGROUND_AUTONEG */
109
110 #define ETHER_CRC_LEN 4 /* XXX Should be in a common header. */
111
112 /*
113 * Various supported device vendors/types and their names.
114 */
115 static struct vr_type {
116 pci_vendor_id_t vr_vid;
117 pci_product_id_t vr_did;
118 const char *vr_name;
119 } vr_devs[] = {
120 { VIA_VENDORID, VIA_DEVICEID_RHINE,
121 "VIA VT3043 Rhine I 10/100BaseTX" },
122 { VIA_VENDORID, VIA_DEVICEID_RHINE_II,
123 "VIA VT86C100A Rhine II 10/100BaseTX" },
124 { 0, 0, NULL }
125 };
126
127 /*
128 * Various supported PHY vendors/types and their names. Note that
129 * this driver will work with pretty much any MII-compliant PHY,
130 * so failure to positively identify the chip is not a fatal error.
131 */
132
133 static struct vr_type vr_phys[] = {
134 { TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
135 { TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
136 { NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
137 { LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
138 { INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
139 { SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
140 { 0, 0, "<MII-compliant physical interface>" }
141 };
142
143 struct vr_mii_frame {
144 u_int8_t mii_stdelim;
145 u_int8_t mii_opcode;
146 u_int8_t mii_phyaddr;
147 u_int8_t mii_regaddr;
148 u_int8_t mii_turnaround;
149 u_int16_t mii_data;
150 };
151
152 /*
153 * MII constants
154 */
155 #define VR_MII_STARTDELIM 0x01
156 #define VR_MII_READOP 0x02
157 #define VR_MII_WRITEOP 0x01
158 #define VR_MII_TURNAROUND 0x02
159
160 #define VR_FLAG_FORCEDELAY 1
161 #define VR_FLAG_SCHEDDELAY 2
162 #define VR_FLAG_DELAYTIMEO 3
163
164 struct vr_list_data {
165 struct vr_desc vr_rx_list[VR_RX_LIST_CNT];
166 struct vr_desc vr_tx_list[VR_TX_LIST_CNT];
167 };
168
169 struct vr_chain {
170 struct vr_desc *vr_ptr;
171 struct mbuf *vr_mbuf;
172 struct vr_chain *vr_nextdesc;
173 };
174
175 struct vr_chain_onefrag {
176 struct vr_desc *vr_ptr;
177 struct mbuf *vr_mbuf;
178 struct vr_chain_onefrag *vr_nextdesc;
179 };
180
181 struct vr_chain_data {
182 struct vr_chain_onefrag vr_rx_chain[VR_RX_LIST_CNT];
183 struct vr_chain vr_tx_chain[VR_TX_LIST_CNT];
184
185 struct vr_chain_onefrag *vr_rx_head;
186
187 struct vr_chain *vr_tx_head;
188 struct vr_chain *vr_tx_tail;
189 struct vr_chain *vr_tx_free;
190 };
191
192 struct vr_softc {
193 struct device vr_dev;
194 void *vr_ih;
195 void *vr_ats;
196 bus_space_tag_t vr_bustag;
197 bus_space_handle_t vr_bushandle;
198 pci_chipset_tag_t vr_pc;
199 struct ethercom vr_ec;
200 u_int8_t vr_enaddr[ETHER_ADDR_LEN];
201 struct ifmedia ifmedia; /* media info */
202 bus_space_handle_t vr_bhandle; /* bus space handle */
203 bus_space_tag_t vr_btag; /* bus space tag */
204 struct vr_type *vr_info; /* Rhine adapter info */
205 struct vr_type *vr_pinfo; /* phy info */
206 u_int8_t vr_unit; /* interface number */
207 u_int8_t vr_type;
208 u_int8_t vr_phy_addr; /* PHY address */
209 u_int8_t vr_tx_pend; /* TX pending */
210 u_int8_t vr_want_auto;
211 u_int8_t vr_autoneg;
212 caddr_t vr_ldata_ptr;
213 struct vr_list_data *vr_ldata;
214 struct vr_chain_data vr_cdata;
215 };
216
217 /*
218 * register space access macros
219 */
220 #define CSR_WRITE_4(sc, reg, val) \
221 bus_space_write_4(sc->vr_btag, sc->vr_bhandle, reg, val)
222 #define CSR_WRITE_2(sc, reg, val) \
223 bus_space_write_2(sc->vr_btag, sc->vr_bhandle, reg, val)
224 #define CSR_WRITE_1(sc, reg, val) \
225 bus_space_write_1(sc->vr_btag, sc->vr_bhandle, reg, val)
226
227 #define CSR_READ_4(sc, reg) \
228 bus_space_read_4(sc->vr_btag, sc->vr_bhandle, reg)
229 #define CSR_READ_2(sc, reg) \
230 bus_space_read_2(sc->vr_btag, sc->vr_bhandle, reg)
231 #define CSR_READ_1(sc, reg) \
232 bus_space_read_1(sc->vr_btag, sc->vr_bhandle, reg)
233
234 #define VR_TIMEOUT 1000
235
236 static int vr_newbuf __P((struct vr_softc *,
237 struct vr_chain_onefrag *));
238 static int vr_encap __P((struct vr_softc *, struct vr_chain *,
239 struct mbuf *));
240
241 static void vr_rxeof __P((struct vr_softc *));
242 static void vr_rxeoc __P((struct vr_softc *));
243 static void vr_txeof __P((struct vr_softc *));
244 static void vr_txeoc __P((struct vr_softc *));
245 static void vr_intr __P((void *));
246 static void vr_start __P((struct ifnet *));
247 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t));
248 static void vr_init __P((void *));
249 static void vr_stop __P((struct vr_softc *));
250 static void vr_watchdog __P((struct ifnet *));
251 static int vr_ifmedia_upd __P((struct ifnet *));
252 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
253
254 static void vr_mii_sync __P((struct vr_softc *));
255 static void vr_mii_send __P((struct vr_softc *, u_int32_t, int));
256 static int vr_mii_readreg __P((struct vr_softc *, struct vr_mii_frame *));
257 static int vr_mii_writereg __P((struct vr_softc *, struct vr_mii_frame *));
258 static u_int16_t vr_phy_readreg __P((struct vr_softc *, int));
259 static void vr_phy_writereg __P((struct vr_softc *, u_int16_t, u_int16_t));
260
261 static void vr_autoneg_xmit __P((struct vr_softc *));
262 static void vr_autoneg_mii __P((struct vr_softc *, int, int));
263 static void vr_setmode_mii __P((struct vr_softc *, int));
264 static void vr_getmode_mii __P((struct vr_softc *));
265 static void vr_setcfg __P((struct vr_softc *, u_int16_t));
266 static u_int8_t vr_calchash __P((u_int8_t *));
267 static void vr_setmulti __P((struct vr_softc *));
268 static void vr_reset __P((struct vr_softc *));
269 static int vr_list_rx_init __P((struct vr_softc *));
270 static int vr_list_tx_init __P((struct vr_softc *));
271
272 #define VR_SETBIT(sc, reg, x) \
273 CSR_WRITE_1(sc, reg, \
274 CSR_READ_1(sc, reg) | x)
275
276 #define VR_CLRBIT(sc, reg, x) \
277 CSR_WRITE_1(sc, reg, \
278 CSR_READ_1(sc, reg) & ~x)
279
280 #define VR_SETBIT16(sc, reg, x) \
281 CSR_WRITE_2(sc, reg, \
282 CSR_READ_2(sc, reg) | x)
283
284 #define VR_CLRBIT16(sc, reg, x) \
285 CSR_WRITE_2(sc, reg, \
286 CSR_READ_2(sc, reg) & ~x)
287
288 #define VR_SETBIT32(sc, reg, x) \
289 CSR_WRITE_4(sc, reg, \
290 CSR_READ_4(sc, reg) | x)
291
292 #define VR_CLRBIT32(sc, reg, x) \
293 CSR_WRITE_4(sc, reg, \
294 CSR_READ_4(sc, reg) & ~x)
295
296 #define SIO_SET(x) \
297 CSR_WRITE_1(sc, VR_MIICMD, \
298 CSR_READ_1(sc, VR_MIICMD) | x)
299
300 #define SIO_CLR(x) \
301 CSR_WRITE_1(sc, VR_MIICMD, \
302 CSR_READ_1(sc, VR_MIICMD) & ~x)
303
304 /*
305 * Sync the PHYs by setting data bit and strobing the clock 32 times.
306 */
307 static void vr_mii_sync(sc)
308 struct vr_softc *sc;
309 {
310 register int i;
311
312 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
313
314 for (i = 0; i < 32; i++) {
315 SIO_SET(VR_MIICMD_CLK);
316 DELAY(1);
317 SIO_CLR(VR_MIICMD_CLK);
318 DELAY(1);
319 }
320
321 return;
322 }
323
324 /*
325 * Clock a series of bits through the MII.
326 */
327 static void vr_mii_send(sc, bits, cnt)
328 struct vr_softc *sc;
329 u_int32_t bits;
330 int cnt;
331 {
332 int i;
333
334 SIO_CLR(VR_MIICMD_CLK);
335
336 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
337 if (bits & i) {
338 SIO_SET(VR_MIICMD_DATAIN);
339 } else {
340 SIO_CLR(VR_MIICMD_DATAIN);
341 }
342 DELAY(1);
343 SIO_CLR(VR_MIICMD_CLK);
344 DELAY(1);
345 SIO_SET(VR_MIICMD_CLK);
346 }
347 }
348
349 /*
350 * Read an PHY register through the MII.
351 */
352 static int vr_mii_readreg(sc, frame)
353 struct vr_softc *sc;
354 struct vr_mii_frame *frame;
355
356 {
357 int i, ack, s;
358
359 s = splimp();
360
361 /*
362 * Set up frame for RX.
363 */
364 frame->mii_stdelim = VR_MII_STARTDELIM;
365 frame->mii_opcode = VR_MII_READOP;
366 frame->mii_turnaround = 0;
367 frame->mii_data = 0;
368
369 CSR_WRITE_1(sc, VR_MIICMD, 0);
370 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
371
372 /*
373 * Turn on data xmit.
374 */
375 SIO_SET(VR_MIICMD_DIR);
376
377 vr_mii_sync(sc);
378
379 /*
380 * Send command/address info.
381 */
382 vr_mii_send(sc, frame->mii_stdelim, 2);
383 vr_mii_send(sc, frame->mii_opcode, 2);
384 vr_mii_send(sc, frame->mii_phyaddr, 5);
385 vr_mii_send(sc, frame->mii_regaddr, 5);
386
387 /* Idle bit */
388 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
389 DELAY(1);
390 SIO_SET(VR_MIICMD_CLK);
391 DELAY(1);
392
393 /* Turn off xmit. */
394 SIO_CLR(VR_MIICMD_DIR);
395
396 /* Check for ack */
397 SIO_CLR(VR_MIICMD_CLK);
398 DELAY(1);
399 SIO_SET(VR_MIICMD_CLK);
400 DELAY(1);
401 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
402
403 /*
404 * Now try reading data bits. If the ack failed, we still
405 * need to clock through 16 cycles to keep the PHY(s) in sync.
406 */
407 if (ack) {
408 for (i = 0; i < 16; i++) {
409 SIO_CLR(VR_MIICMD_CLK);
410 DELAY(1);
411 SIO_SET(VR_MIICMD_CLK);
412 DELAY(1);
413 }
414 goto fail;
415 }
416
417 for (i = 0x8000; i; i >>= 1) {
418 SIO_CLR(VR_MIICMD_CLK);
419 DELAY(1);
420 if (!ack) {
421 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
422 frame->mii_data |= i;
423 DELAY(1);
424 }
425 SIO_SET(VR_MIICMD_CLK);
426 DELAY(1);
427 }
428
429 fail:
430
431 SIO_CLR(VR_MIICMD_CLK);
432 DELAY(1);
433 SIO_SET(VR_MIICMD_CLK);
434 DELAY(1);
435
436 splx(s);
437
438 if (ack)
439 return (1);
440 return (0);
441 }
442
443 /*
444 * Write to a PHY register through the MII.
445 */
446 static int vr_mii_writereg(sc, frame)
447 struct vr_softc *sc;
448 struct vr_mii_frame *frame;
449 {
450 int s;
451
452 s = splimp();
453
454 CSR_WRITE_1(sc, VR_MIICMD, 0);
455 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
456
457 /*
458 * Set up frame for TX.
459 */
460
461 frame->mii_stdelim = VR_MII_STARTDELIM;
462 frame->mii_opcode = VR_MII_WRITEOP;
463 frame->mii_turnaround = VR_MII_TURNAROUND;
464
465 /*
466 * Turn on data output.
467 */
468 SIO_SET(VR_MIICMD_DIR);
469
470 vr_mii_sync(sc);
471
472 vr_mii_send(sc, frame->mii_stdelim, 2);
473 vr_mii_send(sc, frame->mii_opcode, 2);
474 vr_mii_send(sc, frame->mii_phyaddr, 5);
475 vr_mii_send(sc, frame->mii_regaddr, 5);
476 vr_mii_send(sc, frame->mii_turnaround, 2);
477 vr_mii_send(sc, frame->mii_data, 16);
478
479 /* Idle bit. */
480 SIO_SET(VR_MIICMD_CLK);
481 DELAY(1);
482 SIO_CLR(VR_MIICMD_CLK);
483 DELAY(1);
484
485 /*
486 * Turn off xmit.
487 */
488 SIO_CLR(VR_MIICMD_DIR);
489
490 splx(s);
491
492 return (0);
493 }
494
495 static u_int16_t vr_phy_readreg(sc, reg)
496 struct vr_softc *sc;
497 int reg;
498 {
499 struct vr_mii_frame frame;
500
501 bzero((char *)&frame, sizeof (frame));
502
503 frame.mii_phyaddr = sc->vr_phy_addr;
504 frame.mii_regaddr = reg;
505 vr_mii_readreg(sc, &frame);
506
507 return (frame.mii_data);
508 }
509
510 static void vr_phy_writereg(sc, reg, data)
511 struct vr_softc *sc;
512 u_int16_t reg;
513 u_int16_t data;
514 {
515 struct vr_mii_frame frame;
516
517 bzero((char *)&frame, sizeof (frame));
518
519 frame.mii_phyaddr = sc->vr_phy_addr;
520 frame.mii_regaddr = reg;
521 frame.mii_data = data;
522
523 vr_mii_writereg(sc, &frame);
524
525 return;
526 }
527
528 /*
529 * Calculate CRC of a multicast group address, return the lower 6 bits.
530 */
531 static u_int8_t vr_calchash(addr)
532 u_int8_t *addr;
533 {
534 u_int32_t crc, carry;
535 int i, j;
536 u_int8_t c;
537
538 /* Compute CRC for the address value. */
539 crc = 0xFFFFFFFF; /* initial value */
540
541 for (i = 0; i < 6; i++) {
542 c = *(addr + i);
543 for (j = 0; j < 8; j++) {
544 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
545 crc <<= 1;
546 c >>= 1;
547 if (carry)
548 crc = (crc ^ 0x04c11db6) | carry;
549 }
550 }
551
552 /* return the filter bit position */
553 return ((crc >> 26) & 0x0000003F);
554 }
555
556 /*
557 * Program the 64-bit multicast hash filter.
558 */
559 static void vr_setmulti(sc)
560 struct vr_softc *sc;
561 {
562 struct ifnet *ifp;
563 int h = 0;
564 u_int32_t hashes[2] = { 0, 0 };
565 struct ether_multistep step;
566 struct ether_multi *enm;
567 int mcnt = 0;
568 u_int8_t rxfilt;
569
570 ifp = &sc->vr_ec.ec_if;
571
572 rxfilt = CSR_READ_1(sc, VR_RXCFG);
573
574 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
575 rxfilt |= VR_RXCFG_RX_MULTI;
576 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
577 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
578 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
579 return;
580 }
581
582 /* first, zot all the existing hash bits */
583 CSR_WRITE_4(sc, VR_MAR0, 0);
584 CSR_WRITE_4(sc, VR_MAR1, 0);
585
586 /* now program new ones */
587 ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
588 while (enm != NULL) {
589 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0)
590 continue;
591
592 h = vr_calchash(enm->enm_addrlo);
593
594 if (h < 32)
595 hashes[0] |= (1 << h);
596 else
597 hashes[1] |= (1 << (h - 32));
598 ETHER_NEXT_MULTI(step, enm);
599 mcnt++;
600 }
601
602 if (mcnt)
603 rxfilt |= VR_RXCFG_RX_MULTI;
604 else
605 rxfilt &= ~VR_RXCFG_RX_MULTI;
606
607 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
608 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
609 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
610
611 return;
612 }
613
614 /*
615 * Initiate an autonegotiation session.
616 */
617 static void vr_autoneg_xmit(sc)
618 struct vr_softc *sc;
619 {
620 u_int16_t phy_sts;
621
622 vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
623 DELAY(500);
624 while (vr_phy_readreg(sc, PHY_BMCR)
625 & PHY_BMCR_RESET);
626
627 phy_sts = vr_phy_readreg(sc, PHY_BMCR);
628 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
629 vr_phy_writereg(sc, PHY_BMCR, phy_sts);
630
631 return;
632 }
633
634 /*
635 * Invoke autonegotiation on a PHY.
636 */
637 static void vr_autoneg_mii(sc, flag, verbose)
638 struct vr_softc *sc;
639 int flag;
640 int verbose;
641 {
642 u_int16_t phy_sts = 0, media, advert, ability;
643 struct ifnet *ifp;
644 struct ifmedia *ifm;
645
646 ifm = &sc->ifmedia;
647 ifp = &sc->vr_ec.ec_if;
648
649 ifm->ifm_media = IFM_ETHER | IFM_AUTO;
650
651 /*
652 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
653 * bit cleared in the status register, but has the 'autoneg enabled'
654 * bit set in the control register. This is a contradiction, and
655 * I'm not sure how to handle it. If you want to force an attempt
656 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
657 * and see what happens.
658 */
659 #ifndef FORCE_AUTONEG_TFOUR
660 /*
661 * First, see if autoneg is supported. If not, there's
662 * no point in continuing.
663 */
664 phy_sts = vr_phy_readreg(sc, PHY_BMSR);
665 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
666 if (verbose)
667 printf("%s: autonegotiation not supported\n",
668 sc->vr_dev.dv_xname);
669 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
670 return;
671 }
672 #endif
673
674 switch (flag) {
675 case VR_FLAG_FORCEDELAY:
676 /*
677 * XXX Never use this option anywhere but in the probe
678 * routine: making the kernel stop dead in its tracks
679 * for three whole seconds after we've gone multi-user
680 * is really bad manners.
681 */
682 vr_autoneg_xmit(sc);
683 DELAY(5000000);
684 break;
685 case VR_FLAG_SCHEDDELAY:
686 /*
687 * Wait for the transmitter to go idle before starting
688 * an autoneg session, otherwise vr_start() may clobber
689 * our timeout, and we don't want to allow transmission
690 * during an autoneg session since that can screw it up.
691 */
692 if (sc->vr_cdata.vr_tx_head != NULL) {
693 sc->vr_want_auto = 1;
694 return;
695 }
696 vr_autoneg_xmit(sc);
697 ifp->if_timer = 5;
698 sc->vr_autoneg = 1;
699 sc->vr_want_auto = 0;
700 return;
701 break;
702 case VR_FLAG_DELAYTIMEO:
703 ifp->if_timer = 0;
704 sc->vr_autoneg = 0;
705 break;
706 default:
707 printf("%s: invalid autoneg flag: %d\n",
708 sc->vr_dev.dv_xname, flag);
709 return;
710 }
711
712 if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
713 if (verbose)
714 printf("%s: autoneg complete, ",
715 sc->vr_dev.dv_xname);
716 phy_sts = vr_phy_readreg(sc, PHY_BMSR);
717 } else {
718 if (verbose)
719 printf("%s: autoneg not complete, ",
720 sc->vr_dev.dv_xname);
721 }
722
723 media = vr_phy_readreg(sc, PHY_BMCR);
724
725 /* Link is good. Report modes and set duplex mode. */
726 if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
727 if (verbose)
728 printf("link status good ");
729 advert = vr_phy_readreg(sc, PHY_ANAR);
730 ability = vr_phy_readreg(sc, PHY_LPAR);
731
732 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
733 ifm->ifm_media = IFM_ETHER|IFM_100_T4;
734 media |= PHY_BMCR_SPEEDSEL;
735 media &= ~PHY_BMCR_DUPLEX;
736 printf("(100baseT4)\n");
737 } else if (advert & PHY_ANAR_100BTXFULL &&
738 ability & PHY_ANAR_100BTXFULL) {
739 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
740 media |= PHY_BMCR_SPEEDSEL;
741 media |= PHY_BMCR_DUPLEX;
742 printf("(full-duplex, 100Mbps)\n");
743 } else if (advert & PHY_ANAR_100BTXHALF &&
744 ability & PHY_ANAR_100BTXHALF) {
745 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
746 media |= PHY_BMCR_SPEEDSEL;
747 media &= ~PHY_BMCR_DUPLEX;
748 printf("(half-duplex, 100Mbps)\n");
749 } else if (advert & PHY_ANAR_10BTFULL &&
750 ability & PHY_ANAR_10BTFULL) {
751 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
752 media &= ~PHY_BMCR_SPEEDSEL;
753 media |= PHY_BMCR_DUPLEX;
754 printf("(full-duplex, 10Mbps)\n");
755 } else {
756 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
757 media &= ~PHY_BMCR_SPEEDSEL;
758 media &= ~PHY_BMCR_DUPLEX;
759 printf("(half-duplex, 10Mbps)\n");
760 }
761
762 media &= ~PHY_BMCR_AUTONEGENBL;
763
764 /* Set ASIC's duplex mode to match the PHY. */
765 vr_setcfg(sc, media);
766 vr_phy_writereg(sc, PHY_BMCR, media);
767 } else {
768 if (verbose)
769 printf("no carrier\n");
770 }
771
772 vr_init(sc);
773
774 if (sc->vr_tx_pend) {
775 sc->vr_autoneg = 0;
776 sc->vr_tx_pend = 0;
777 vr_start(ifp);
778 }
779
780 return;
781 }
782
783 static void vr_getmode_mii(sc)
784 struct vr_softc *sc;
785 {
786 u_int16_t bmsr;
787 struct ifnet *ifp;
788
789 ifp = &sc->vr_ec.ec_if;
790
791 bmsr = vr_phy_readreg(sc, PHY_BMSR);
792
793 /* fallback */
794 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
795
796 if (bmsr & PHY_BMSR_10BTHALF) {
797 ifmedia_add(&sc->ifmedia,
798 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
799 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
800 }
801
802 if (bmsr & PHY_BMSR_10BTFULL) {
803 ifmedia_add(&sc->ifmedia,
804 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
805 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
806 }
807
808 if (bmsr & PHY_BMSR_100BTXHALF) {
809 ifp->if_baudrate = 100000000;
810 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
811 ifmedia_add(&sc->ifmedia,
812 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
813 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
814 }
815
816 if (bmsr & PHY_BMSR_100BTXFULL) {
817 ifp->if_baudrate = 100000000;
818 ifmedia_add(&sc->ifmedia,
819 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
820 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
821 }
822
823 /* Some also support 100BaseT4. */
824 if (bmsr & PHY_BMSR_100BT4) {
825 ifp->if_baudrate = 100000000;
826 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
827 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
828 #ifdef FORCE_AUTONEG_TFOUR
829 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0 NULL):
830 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
831 #endif
832 }
833
834 if (bmsr & PHY_BMSR_CANAUTONEG) {
835 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
836 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
837 }
838
839 return;
840 }
841
842 /*
843 * Set speed and duplex mode.
844 */
845 static void vr_setmode_mii(sc, media)
846 struct vr_softc *sc;
847 int media;
848 {
849 u_int16_t bmcr;
850 struct ifnet *ifp;
851
852 ifp = &sc->vr_ec.ec_if;
853
854 /*
855 * If an autoneg session is in progress, stop it.
856 */
857 if (sc->vr_autoneg) {
858 printf("%s: canceling autoneg session\n",
859 sc->vr_dev.dv_xname);
860 ifp->if_timer = sc->vr_autoneg = sc->vr_want_auto = 0;
861 bmcr = vr_phy_readreg(sc, PHY_BMCR);
862 bmcr &= ~PHY_BMCR_AUTONEGENBL;
863 vr_phy_writereg(sc, PHY_BMCR, bmcr);
864 }
865
866 printf("%s: selecting MII, ", sc->vr_dev.dv_xname);
867
868 bmcr = vr_phy_readreg(sc, PHY_BMCR);
869
870 bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
871 PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
872
873 if (IFM_SUBTYPE(media) == IFM_100_T4) {
874 printf("100Mbps/T4, half-duplex\n");
875 bmcr |= PHY_BMCR_SPEEDSEL;
876 bmcr &= ~PHY_BMCR_DUPLEX;
877 }
878
879 if (IFM_SUBTYPE(media) == IFM_100_TX) {
880 printf("100Mbps, ");
881 bmcr |= PHY_BMCR_SPEEDSEL;
882 }
883
884 if (IFM_SUBTYPE(media) == IFM_10_T) {
885 printf("10Mbps, ");
886 bmcr &= ~PHY_BMCR_SPEEDSEL;
887 }
888
889 if ((media & IFM_GMASK) == IFM_FDX) {
890 printf("full duplex\n");
891 bmcr |= PHY_BMCR_DUPLEX;
892 } else {
893 printf("half duplex\n");
894 bmcr &= ~PHY_BMCR_DUPLEX;
895 }
896
897 vr_setcfg(sc, bmcr);
898 vr_phy_writereg(sc, PHY_BMCR, bmcr);
899
900 return;
901 }
902
903 /*
904 * In order to fiddle with the
905 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
906 * first have to put the transmit and/or receive logic in the idle state.
907 */
908 static void vr_setcfg(sc, bmcr)
909 struct vr_softc *sc;
910 u_int16_t bmcr;
911 {
912 int restart = 0;
913
914 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
915 restart = 1;
916 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
917 }
918
919 if (bmcr & PHY_BMCR_DUPLEX)
920 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
921 else
922 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
923
924 if (restart)
925 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
926
927 return;
928 }
929
930 static void vr_reset(sc)
931 struct vr_softc *sc;
932 {
933 register int i;
934
935 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
936
937 for (i = 0; i < VR_TIMEOUT; i++) {
938 DELAY(10);
939 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
940 break;
941 }
942 if (i == VR_TIMEOUT)
943 printf("%s: reset never completed!\n",
944 sc->vr_dev.dv_xname);
945
946 /* Wait a little while for the chip to get its brains in order. */
947 DELAY(1000);
948
949 return;
950 }
951
952 /*
953 * Initialize the transmit descriptors.
954 */
955 static int vr_list_tx_init(sc)
956 struct vr_softc *sc;
957 {
958 struct vr_chain_data *cd;
959 struct vr_list_data *ld;
960 int i;
961
962 cd = &sc->vr_cdata;
963 ld = sc->vr_ldata;
964 for (i = 0; i < VR_TX_LIST_CNT; i++) {
965 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
966 if (i == (VR_TX_LIST_CNT - 1))
967 cd->vr_tx_chain[i].vr_nextdesc =
968 &cd->vr_tx_chain[0];
969 else
970 cd->vr_tx_chain[i].vr_nextdesc =
971 &cd->vr_tx_chain[i + 1];
972 }
973
974 cd->vr_tx_free = &cd->vr_tx_chain[0];
975 cd->vr_tx_tail = cd->vr_tx_head = NULL;
976
977 return (0);
978 }
979
980
981 /*
982 * Initialize the RX descriptors and allocate mbufs for them. Note that
983 * we arrange the descriptors in a closed ring, so that the last descriptor
984 * points back to the first.
985 */
986 static int vr_list_rx_init(sc)
987 struct vr_softc *sc;
988 {
989 struct vr_chain_data *cd;
990 struct vr_list_data *ld;
991 int i;
992
993 cd = &sc->vr_cdata;
994 ld = sc->vr_ldata;
995
996 for (i = 0; i < VR_RX_LIST_CNT; i++) {
997 cd->vr_rx_chain[i].vr_ptr =
998 (struct vr_desc *)&ld->vr_rx_list[i];
999 if (vr_newbuf(sc, &cd->vr_rx_chain[i]) == ENOBUFS)
1000 return (ENOBUFS);
1001 if (i == (VR_RX_LIST_CNT - 1)) {
1002 cd->vr_rx_chain[i].vr_nextdesc =
1003 &cd->vr_rx_chain[0];
1004 ld->vr_rx_list[i].vr_next =
1005 vtophys(&ld->vr_rx_list[0]);
1006 } else {
1007 cd->vr_rx_chain[i].vr_nextdesc =
1008 &cd->vr_rx_chain[i + 1];
1009 ld->vr_rx_list[i].vr_next =
1010 vtophys(&ld->vr_rx_list[i + 1]);
1011 }
1012 }
1013
1014 cd->vr_rx_head = &cd->vr_rx_chain[0];
1015
1016 return (0);
1017 }
1018
1019 /*
1020 * Initialize an RX descriptor and attach an MBUF cluster.
1021 * Note: the length fields are only 11 bits wide, which means the
1022 * largest size we can specify is 2047. This is important because
1023 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1024 * overflow the field and make a mess.
1025 */
1026 static int vr_newbuf(sc, c)
1027 struct vr_softc *sc;
1028 struct vr_chain_onefrag *c;
1029 {
1030 struct mbuf *m_new = NULL;
1031
1032 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1033 if (m_new == NULL) {
1034 printf("%s: no memory for rx list -- packet dropped!\n",
1035 sc->vr_dev.dv_xname);
1036 return (ENOBUFS);
1037 }
1038
1039 MCLGET(m_new, M_DONTWAIT);
1040 if (!(m_new->m_flags & M_EXT)) {
1041 printf("%s: no memory for rx list -- packet dropped!\n",
1042 sc->vr_dev.dv_xname);
1043 m_freem(m_new);
1044 return (ENOBUFS);
1045 }
1046
1047 c->vr_mbuf = m_new;
1048 c->vr_ptr->vr_status = VR_RXSTAT;
1049 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
1050 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
1051
1052 return (0);
1053 }
1054
1055 /*
1056 * A frame has been uploaded: pass the resulting mbuf chain up to
1057 * the higher level protocols.
1058 */
1059 static void vr_rxeof(sc)
1060 struct vr_softc *sc;
1061 {
1062 struct ether_header *eh;
1063 struct mbuf *m;
1064 struct ifnet *ifp;
1065 struct vr_chain_onefrag *cur_rx;
1066 int total_len = 0;
1067 u_int32_t rxstat;
1068
1069 ifp = &sc->vr_ec.ec_if;
1070
1071 while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
1072 VR_RXSTAT_OWN)) {
1073 cur_rx = sc->vr_cdata.vr_rx_head;
1074 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
1075
1076 /*
1077 * If an error occurs, update stats, clear the
1078 * status word and leave the mbuf cluster in place:
1079 * it should simply get re-used next time this descriptor
1080 * comes up in the ring.
1081 */
1082 if (rxstat & VR_RXSTAT_RXERR) {
1083 ifp->if_ierrors++;
1084 printf("%s: rx error: ", sc->vr_dev.dv_xname);
1085 switch (rxstat & 0x000000FF) {
1086 case VR_RXSTAT_CRCERR:
1087 printf("crc error\n");
1088 break;
1089 case VR_RXSTAT_FRAMEALIGNERR:
1090 printf("frame alignment error\n");
1091 break;
1092 case VR_RXSTAT_FIFOOFLOW:
1093 printf("FIFO overflow\n");
1094 break;
1095 case VR_RXSTAT_GIANT:
1096 printf("received giant packet\n");
1097 break;
1098 case VR_RXSTAT_RUNT:
1099 printf("received runt packet\n");
1100 break;
1101 case VR_RXSTAT_BUSERR:
1102 printf("system bus error\n");
1103 break;
1104 case VR_RXSTAT_BUFFERR:
1105 printf("rx buffer error\n");
1106 break;
1107 default:
1108 printf("unknown rx error\n");
1109 break;
1110 }
1111 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
1112 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
1113 continue;
1114 }
1115
1116 /* No errors; receive the packet. */
1117 m = cur_rx->vr_mbuf;
1118 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1119
1120 /*
1121 * XXX The VIA Rhine chip includes the CRC with every
1122 * received frame, and there's no way to turn this
1123 * behavior off (at least, I can't find anything in
1124 * the manual that explains how to do it) so we have
1125 * to trim off the CRC manually.
1126 */
1127 total_len -= ETHER_CRC_LEN;
1128
1129 /*
1130 * Try to conjure up a new mbuf cluster. If that
1131 * fails, it means we have an out of memory condition and
1132 * should leave the buffer in place and continue. This will
1133 * result in a lost packet, but there's little else we
1134 * can do in this situation.
1135 */
1136 if (vr_newbuf(sc, cur_rx) == ENOBUFS) {
1137 ifp->if_ierrors++;
1138 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
1139 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
1140 continue;
1141 }
1142
1143 ifp->if_ipackets++;
1144 eh = mtod(m, struct ether_header *);
1145 m->m_pkthdr.rcvif = ifp;
1146 m->m_pkthdr.len = m->m_len = total_len;
1147 #if NBPFILTER > 0
1148 /*
1149 * Handle BPF listeners. Let the BPF user see the packet, but
1150 * don't pass it up to the ether_input() layer unless it's
1151 * a broadcast packet, multicast packet, matches our ethernet
1152 * address or the interface is in promiscuous mode.
1153 */
1154 if (ifp->if_bpf) {
1155 bpf_mtap(ifp->if_bpf, m);
1156 if (ifp->if_flags & IFF_PROMISC &&
1157 (memcmp(eh->ether_dhost, sc->vr_enaddr,
1158 ETHER_ADDR_LEN) &&
1159 (eh->ether_dhost[0] & 1) == 0)) {
1160 m_freem(m);
1161 continue;
1162 }
1163 }
1164 #endif
1165 /* Remove header from mbuf and pass it on. */
1166 m_adj(m, sizeof (struct ether_header));
1167 ether_input(ifp, eh, m);
1168 }
1169
1170 return;
1171 }
1172
1173 void vr_rxeoc(sc)
1174 struct vr_softc *sc;
1175 {
1176
1177 vr_rxeof(sc);
1178 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1179 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1180 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1181 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1182
1183 return;
1184 }
1185
1186 /*
1187 * A frame was downloaded to the chip. It's safe for us to clean up
1188 * the list buffers.
1189 */
1190
1191 static void vr_txeof(sc)
1192 struct vr_softc *sc;
1193 {
1194 struct vr_chain *cur_tx;
1195 struct ifnet *ifp;
1196 register struct mbuf *n;
1197
1198 ifp = &sc->vr_ec.ec_if;
1199
1200 /* Clear the timeout timer. */
1201 ifp->if_timer = 0;
1202
1203 /* Sanity check. */
1204 if (sc->vr_cdata.vr_tx_head == NULL)
1205 return;
1206
1207 /*
1208 * Go through our tx list and free mbufs for those
1209 * frames that have been transmitted.
1210 */
1211 while (sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1212 u_int32_t txstat;
1213
1214 cur_tx = sc->vr_cdata.vr_tx_head;
1215 txstat = cur_tx->vr_ptr->vr_status;
1216
1217 if (txstat & VR_TXSTAT_OWN)
1218 break;
1219
1220 if (txstat & VR_TXSTAT_ERRSUM) {
1221 ifp->if_oerrors++;
1222 if (txstat & VR_TXSTAT_DEFER)
1223 ifp->if_collisions++;
1224 if (txstat & VR_TXSTAT_LATECOLL)
1225 ifp->if_collisions++;
1226 }
1227
1228 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1229
1230 ifp->if_opackets++;
1231 MFREE(cur_tx->vr_mbuf, n);
1232 cur_tx->vr_mbuf = NULL;
1233
1234 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1235 sc->vr_cdata.vr_tx_head = NULL;
1236 sc->vr_cdata.vr_tx_tail = NULL;
1237 break;
1238 }
1239
1240 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1241 }
1242
1243 return;
1244 }
1245
1246 /*
1247 * TX 'end of channel' interrupt handler.
1248 */
1249 static void vr_txeoc(sc)
1250 struct vr_softc *sc;
1251 {
1252 struct ifnet *ifp;
1253
1254 ifp = &sc->vr_ec.ec_if;
1255
1256 ifp->if_timer = 0;
1257
1258 if (sc->vr_cdata.vr_tx_head == NULL) {
1259 ifp->if_flags &= ~IFF_OACTIVE;
1260 sc->vr_cdata.vr_tx_tail = NULL;
1261 if (sc->vr_want_auto)
1262 vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1263 }
1264
1265 return;
1266 }
1267
1268 static void vr_intr(arg)
1269 void *arg;
1270 {
1271 struct vr_softc *sc;
1272 struct ifnet *ifp;
1273 u_int16_t status;
1274
1275 sc = arg;
1276 ifp = &sc->vr_ec.ec_if;
1277
1278 /* Supress unwanted interrupts. */
1279 if (!(ifp->if_flags & IFF_UP)) {
1280 vr_stop(sc);
1281 return;
1282 }
1283
1284 /* Disable interrupts. */
1285 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1286
1287 for (;;) {
1288
1289 status = CSR_READ_2(sc, VR_ISR);
1290 if (status)
1291 CSR_WRITE_2(sc, VR_ISR, status);
1292
1293 if ((status & VR_INTRS) == 0)
1294 break;
1295
1296 if (status & VR_ISR_RX_OK)
1297 vr_rxeof(sc);
1298
1299 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1300 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
1301 (status & VR_ISR_RX_DROPPED)) {
1302 vr_rxeof(sc);
1303 vr_rxeoc(sc);
1304 }
1305
1306 if (status & VR_ISR_TX_OK) {
1307 vr_txeof(sc);
1308 vr_txeoc(sc);
1309 }
1310
1311 if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)) {
1312 ifp->if_oerrors++;
1313 vr_txeof(sc);
1314 if (sc->vr_cdata.vr_tx_head != NULL) {
1315 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1316 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1317 }
1318 }
1319
1320 if (status & VR_ISR_BUSERR) {
1321 vr_reset(sc);
1322 vr_init(sc);
1323 }
1324 }
1325
1326 /* Re-enable interrupts. */
1327 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1328
1329 if (ifp->if_snd.ifq_head != NULL) {
1330 vr_start(ifp);
1331 }
1332
1333 return;
1334 }
1335
1336 /*
1337 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1338 * pointers to the fragment pointers.
1339 */
1340 static int vr_encap(sc, c, m_head)
1341 struct vr_softc *sc;
1342 struct vr_chain *c;
1343 struct mbuf *m_head;
1344 {
1345 int frag = 0;
1346 struct vr_desc *f = NULL;
1347 int total_len;
1348 struct mbuf *m;
1349
1350 m = m_head;
1351 total_len = 0;
1352
1353 /*
1354 * The VIA Rhine wants packet buffers to be longword
1355 * aligned, but very often our mbufs aren't. Rather than
1356 * waste time trying to decide when to copy and when not
1357 * to copy, just do it all the time.
1358 */
1359 if (m != NULL) {
1360 struct mbuf *m_new = NULL;
1361
1362 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1363 if (m_new == NULL) {
1364 printf("%s: no memory for tx list",
1365 sc->vr_dev.dv_xname);
1366 return (1);
1367 }
1368 if (m_head->m_pkthdr.len > MHLEN) {
1369 MCLGET(m_new, M_DONTWAIT);
1370 if (!(m_new->m_flags & M_EXT)) {
1371 m_freem(m_new);
1372 printf("%s: no memory for tx list",
1373 sc->vr_dev.dv_xname);
1374 return (1);
1375 }
1376 }
1377 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1378 mtod(m_new, caddr_t));
1379 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1380 m_freem(m_head);
1381 m_head = m_new;
1382 /*
1383 * The Rhine chip doesn't auto-pad, so we have to make
1384 * sure to pad short frames out to the minimum frame length
1385 * ourselves.
1386 */
1387 if (m_head->m_len < VR_MIN_FRAMELEN) {
1388 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1389 m_new->m_len = m_new->m_pkthdr.len;
1390 }
1391 f = c->vr_ptr;
1392 f->vr_data = vtophys(mtod(m_new, caddr_t));
1393 f->vr_ctl = total_len = m_new->m_len;
1394 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1395 f->vr_status = 0;
1396 frag = 1;
1397 }
1398
1399 c->vr_mbuf = m_head;
1400 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1401 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1402
1403 return (0);
1404 }
1405
1406 /*
1407 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1408 * to the mbuf data regions directly in the transmit lists. We also save a
1409 * copy of the pointers since the transmit list fragment pointers are
1410 * physical addresses.
1411 */
1412
1413 static void vr_start(ifp)
1414 struct ifnet *ifp;
1415 {
1416 struct vr_softc *sc;
1417 struct mbuf *m_head = NULL;
1418 struct vr_chain *cur_tx = NULL, *start_tx;
1419
1420 sc = ifp->if_softc;
1421
1422 if (sc->vr_autoneg) {
1423 sc->vr_tx_pend = 1;
1424 return;
1425 }
1426
1427 /*
1428 * Check for an available queue slot. If there are none,
1429 * punt.
1430 */
1431 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1432 ifp->if_flags |= IFF_OACTIVE;
1433 return;
1434 }
1435
1436 start_tx = sc->vr_cdata.vr_tx_free;
1437
1438 while (sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1439 IF_DEQUEUE(&ifp->if_snd, m_head);
1440 if (m_head == NULL)
1441 break;
1442
1443 /* Pick a descriptor off the free list. */
1444 cur_tx = sc->vr_cdata.vr_tx_free;
1445 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1446
1447 /* Pack the data into the descriptor. */
1448 vr_encap(sc, cur_tx, m_head);
1449
1450 if (cur_tx != start_tx)
1451 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1452
1453 #if NBPFILTER > 0
1454 /*
1455 * If there's a BPF listener, bounce a copy of this frame
1456 * to him.
1457 */
1458 if (ifp->if_bpf)
1459 bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf);
1460 #endif
1461 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1462 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1463 }
1464
1465 /*
1466 * If there are no frames queued, bail.
1467 */
1468 if (cur_tx == NULL)
1469 return;
1470
1471 sc->vr_cdata.vr_tx_tail = cur_tx;
1472
1473 if (sc->vr_cdata.vr_tx_head == NULL)
1474 sc->vr_cdata.vr_tx_head = start_tx;
1475
1476 /*
1477 * Set a timeout in case the chip goes out to lunch.
1478 */
1479 ifp->if_timer = 5;
1480
1481 return;
1482 }
1483
1484 static void vr_init(xsc)
1485 void *xsc;
1486 {
1487 struct vr_softc *sc = xsc;
1488 struct ifnet *ifp = &sc->vr_ec.ec_if;
1489 u_int16_t phy_bmcr = 0;
1490 int s;
1491
1492 if (sc->vr_autoneg)
1493 return;
1494
1495 s = splimp();
1496
1497 if (sc->vr_pinfo != NULL)
1498 phy_bmcr = vr_phy_readreg(sc, PHY_BMCR);
1499
1500 /*
1501 * Cancel pending I/O and free all RX/TX buffers.
1502 */
1503 vr_stop(sc);
1504 vr_reset(sc);
1505
1506 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1507 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1508
1509 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1510 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1511
1512 /* Init circular RX list. */
1513 if (vr_list_rx_init(sc) == ENOBUFS) {
1514 printf("%s: initialization failed: no "
1515 "memory for rx buffers\n", sc->vr_dev.dv_xname);
1516 vr_stop(sc);
1517 (void)splx(s);
1518 return;
1519 }
1520
1521 /*
1522 * Init tx descriptors.
1523 */
1524 vr_list_tx_init(sc);
1525
1526 /* If we want promiscuous mode, set the allframes bit. */
1527 if (ifp->if_flags & IFF_PROMISC)
1528 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1529 else
1530 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1531
1532 /* Set capture broadcast bit to capture broadcast frames. */
1533 if (ifp->if_flags & IFF_BROADCAST)
1534 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1535 else
1536 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1537
1538 /*
1539 * Program the multicast filter, if necessary.
1540 */
1541 vr_setmulti(sc);
1542
1543 /*
1544 * Load the address of the RX list.
1545 */
1546 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1547
1548 /* Enable receiver and transmitter. */
1549 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1550 VR_CMD_TX_ON|VR_CMD_RX_ON|
1551 VR_CMD_RX_GO);
1552
1553 vr_setcfg(sc, vr_phy_readreg(sc, PHY_BMCR));
1554
1555 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1556
1557 /*
1558 * Enable interrupts.
1559 */
1560 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1561 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1562
1563 /* Restore state of BMCR */
1564 if (sc->vr_pinfo != NULL)
1565 vr_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1566
1567 ifp->if_flags |= IFF_RUNNING;
1568 ifp->if_flags &= ~IFF_OACTIVE;
1569
1570 (void)splx(s);
1571
1572 return;
1573 }
1574
1575 /*
1576 * Set media options.
1577 */
1578 static int vr_ifmedia_upd(ifp)
1579 struct ifnet *ifp;
1580 {
1581 struct vr_softc *sc;
1582 struct ifmedia *ifm;
1583
1584 sc = ifp->if_softc;
1585 ifm = &sc->ifmedia;
1586
1587 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1588 return (EINVAL);
1589
1590 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1591 vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1592 else
1593 vr_setmode_mii(sc, ifm->ifm_media);
1594
1595 return (0);
1596 }
1597
1598 /*
1599 * Report current media status.
1600 */
1601 static void vr_ifmedia_sts(ifp, ifmr)
1602 struct ifnet *ifp;
1603 struct ifmediareq *ifmr;
1604 {
1605 struct vr_softc *sc;
1606 u_int16_t advert = 0, ability = 0;
1607
1608 sc = ifp->if_softc;
1609
1610 ifmr->ifm_active = IFM_ETHER;
1611
1612 if (!(vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1613 if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1614 ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
1615 else
1616 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1617 if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1618 ifmr->ifm_active |= IFM_FDX;
1619 else
1620 ifmr->ifm_active |= IFM_HDX;
1621 return;
1622 }
1623
1624 ability = vr_phy_readreg(sc, PHY_LPAR);
1625 advert = vr_phy_readreg(sc, PHY_ANAR);
1626 if (advert & PHY_ANAR_100BT4 &&
1627 ability & PHY_ANAR_100BT4) {
1628 ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
1629 } else if (advert & PHY_ANAR_100BTXFULL &&
1630 ability & PHY_ANAR_100BTXFULL) {
1631 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
1632 } else if (advert & PHY_ANAR_100BTXHALF &&
1633 ability & PHY_ANAR_100BTXHALF) {
1634 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
1635 } else if (advert & PHY_ANAR_10BTFULL &&
1636 ability & PHY_ANAR_10BTFULL) {
1637 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
1638 } else if (advert & PHY_ANAR_10BTHALF &&
1639 ability & PHY_ANAR_10BTHALF) {
1640 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
1641 }
1642
1643 return;
1644 }
1645
1646 static int vr_ioctl(ifp, command, data)
1647 struct ifnet *ifp;
1648 u_long command;
1649 caddr_t data;
1650 {
1651 struct vr_softc *sc = ifp->if_softc;
1652 struct ifreq *ifr = (struct ifreq *)data;
1653 struct ifaddr *ifa = (struct ifaddr *)data;
1654 int s, error = 0;
1655
1656 s = splimp();
1657
1658 switch (command) {
1659 case SIOCSIFADDR:
1660 ifp->if_flags |= IFF_UP;
1661
1662 switch (ifa->ifa_addr->sa_family) {
1663 #ifdef INET
1664 case AF_INET:
1665 vr_init(sc);
1666 arp_ifinit(ifp, ifa);
1667 break;
1668 #endif /* INET */
1669 default:
1670 vr_init(sc);
1671 break;
1672 }
1673 break;
1674
1675 case SIOCGIFADDR:
1676 bcopy((caddr_t) sc->vr_enaddr,
1677 (caddr_t) ((struct sockaddr *)&ifr->ifr_data)->sa_data,
1678 ETHER_ADDR_LEN);
1679 break;
1680
1681 case SIOCSIFMTU:
1682 if (ifr->ifr_mtu > ETHERMTU)
1683 error = EINVAL;
1684 else
1685 ifp->if_mtu = ifr->ifr_mtu;
1686 break;
1687
1688 case SIOCSIFFLAGS:
1689 if (ifp->if_flags & IFF_UP) {
1690 vr_init(sc);
1691 } else {
1692 if (ifp->if_flags & IFF_RUNNING)
1693 vr_stop(sc);
1694 }
1695 error = 0;
1696 break;
1697 case SIOCADDMULTI:
1698 case SIOCDELMULTI:
1699 if (command == SIOCADDMULTI)
1700 error = ether_addmulti(ifr, &sc->vr_ec);
1701 else
1702 error = ether_delmulti(ifr, &sc->vr_ec);
1703
1704 if (error == ENETRESET) {
1705 vr_setmulti(sc);
1706 error = 0;
1707 }
1708 break;
1709 case SIOCGIFMEDIA:
1710 case SIOCSIFMEDIA:
1711 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1712 break;
1713 default:
1714 error = EINVAL;
1715 break;
1716 }
1717
1718 (void)splx(s);
1719
1720 return (error);
1721 }
1722
1723 static void vr_watchdog(ifp)
1724 struct ifnet *ifp;
1725 {
1726 struct vr_softc *sc;
1727
1728 sc = ifp->if_softc;
1729
1730 if (sc->vr_autoneg) {
1731 vr_autoneg_mii(sc, VR_FLAG_DELAYTIMEO, 1);
1732 return;
1733 }
1734
1735 ifp->if_oerrors++;
1736 printf("%s: watchdog timeout\n", sc->vr_dev.dv_xname);
1737
1738 if (!(vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1739 printf("%s: no carrier - transceiver cable problem?\n",
1740 sc->vr_dev.dv_xname);
1741
1742 vr_stop(sc);
1743 vr_reset(sc);
1744 vr_init(sc);
1745
1746 if (ifp->if_snd.ifq_head != NULL)
1747 vr_start(ifp);
1748
1749 return;
1750 }
1751
1752 /*
1753 * Stop the adapter and free any mbufs allocated to the
1754 * RX and TX lists.
1755 */
1756 static void vr_stop(sc)
1757 struct vr_softc *sc;
1758 {
1759 register int i;
1760 struct ifnet *ifp;
1761
1762 ifp = &sc->vr_ec.ec_if;
1763 ifp->if_timer = 0;
1764
1765 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1766 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1767 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1768 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1769 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1770
1771 /*
1772 * Free data in the RX lists.
1773 */
1774 for (i = 0; i < VR_RX_LIST_CNT; i++) {
1775 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1776 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1777 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1778 }
1779 }
1780 bzero((char *)&sc->vr_ldata->vr_rx_list,
1781 sizeof (sc->vr_ldata->vr_rx_list));
1782
1783 /*
1784 * Free the TX list buffers.
1785 */
1786 for (i = 0; i < VR_TX_LIST_CNT; i++) {
1787 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1788 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1789 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1790 }
1791 }
1792
1793 bzero((char *)&sc->vr_ldata->vr_tx_list,
1794 sizeof (sc->vr_ldata->vr_tx_list));
1795
1796 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1797
1798 return;
1799 }
1800
1801 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1802 static int vr_probe __P((struct device *, struct cfdata *, void *));
1803 static void vr_attach __P((struct device *, struct device *, void *));
1804 static void vr_shutdown __P((void *));
1805
1806 struct cfattach vr_ca = {
1807 sizeof (struct vr_softc), vr_probe, vr_attach
1808 };
1809
1810 static struct vr_type *
1811 vr_lookup(pa)
1812 struct pci_attach_args *pa;
1813 {
1814 struct vr_type *vrt;
1815
1816 for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1817 if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1818 PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1819 return (vrt);
1820 }
1821 return (NULL);
1822 }
1823
1824 static int
1825 vr_probe(parent, match, aux)
1826 struct device *parent;
1827 struct cfdata *match;
1828 void *aux;
1829 {
1830 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1831
1832 if (vr_lookup(pa) != NULL)
1833 return (1);
1834
1835 return (0);
1836 }
1837
1838 /*
1839 * Stop all chip I/O so that the kernel's probe routines don't
1840 * get confused by errant DMAs when rebooting.
1841 */
1842 static void vr_shutdown(arg)
1843 void *arg;
1844 {
1845 struct vr_softc *sc = (struct vr_softc *)arg;
1846
1847 vr_stop(sc);
1848
1849 return;
1850 }
1851
1852 /*
1853 * Attach the interface. Allocate softc structures, do ifmedia
1854 * setup and ethernet/BPF attach.
1855 */
1856 static void
1857 vr_attach(parent, self, aux)
1858 struct device * const parent;
1859 struct device * const self;
1860 void * const aux;
1861 {
1862 #define PCI_CONF_WRITE(r, v) pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1863 #define PCI_CONF_READ(r) pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1864 struct vr_softc * const sc = (struct vr_softc *) self;
1865 struct pci_attach_args * const pa = (struct pci_attach_args *) aux;
1866 struct vr_type *vrt;
1867 int i;
1868 u_int32_t command;
1869 struct ifnet *ifp;
1870 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1871 unsigned int round;
1872 caddr_t roundptr;
1873 u_char eaddr[ETHER_ADDR_LEN];
1874 struct vr_type *p;
1875 u_int16_t phy_vid, phy_did, phy_sts;
1876
1877 vrt = vr_lookup(pa);
1878 if (vrt == NULL) {
1879 printf("\n");
1880 panic("vr_attach: impossible");
1881 }
1882
1883 printf(": %s Ethernet\n", vrt->vr_name);
1884
1885 /*
1886 * Handle power management nonsense.
1887 */
1888
1889 command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
1890 if (command == 0x01) {
1891
1892 command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
1893 if (command & VR_PSTATE_MASK) {
1894 u_int32_t iobase, membase, irq;
1895
1896 /* Save important PCI config data. */
1897 iobase = PCI_CONF_READ(VR_PCI_LOIO);
1898 membase = PCI_CONF_READ(VR_PCI_LOMEM);
1899 irq = PCI_CONF_READ(VR_PCI_INTLINE);
1900
1901 /* Reset the power state. */
1902 printf("%s: chip is in D%d power mode "
1903 "-- setting to D0\n",
1904 sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
1905 command &= 0xFFFFFFFC;
1906 PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
1907
1908 /* Restore PCI config data. */
1909 PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1910 PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1911 PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
1912 }
1913 }
1914
1915 /*
1916 * Map control/status registers.
1917 */
1918 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1919 command |= (PCI_COMMAND_IO_ENABLE |
1920 PCI_COMMAND_MEM_ENABLE |
1921 PCI_COMMAND_MASTER_ENABLE);
1922 PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
1923 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1924
1925 {
1926 bus_space_tag_t iot, memt;
1927 bus_space_handle_t ioh, memh;
1928 int ioh_valid, memh_valid;
1929 pci_intr_handle_t intrhandle;
1930 const char *intrstr;
1931
1932 ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1933 PCI_MAPREG_TYPE_IO, 0,
1934 &iot, &ioh, NULL, NULL) == 0);
1935 memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1936 PCI_MAPREG_TYPE_MEM |
1937 PCI_MAPREG_MEM_TYPE_32BIT,
1938 0, &memt, &memh, NULL, NULL) == 0);
1939 #if defined(VR_USEIOSPACE)
1940 if (ioh_valid) {
1941 sc->vr_btag = iot;
1942 sc->vr_bhandle = ioh;
1943 } else if (memh_valid) {
1944 sc->vr_btag = memt;
1945 sc->vr_bhandle = memh;
1946 }
1947 #else
1948 if (memh_valid) {
1949 sc->vr_btag = memt;
1950 sc->vr_bhandle = memh;
1951 } else if (ioh_valid) {
1952 sc->vr_btag = iot;
1953 sc->vr_bhandle = ioh;
1954 }
1955 #endif
1956 else {
1957 printf(": unable to map device registers\n");
1958 return;
1959 }
1960
1961 /* Allocate interrupt */
1962 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
1963 pa->pa_intrline, &intrhandle)) {
1964 printf("%s: couldn't map interrupt\n",
1965 sc->vr_dev.dv_xname);
1966 goto fail;
1967 }
1968 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1969 sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1970 (void *)vr_intr, sc);
1971 if (sc->vr_ih == NULL) {
1972 printf("%s: couldn't establish interrupt",
1973 sc->vr_dev.dv_xname);
1974 if (intrstr != NULL)
1975 printf(" at %s", intrstr);
1976 printf("\n");
1977 }
1978 printf("%s: interrupting at %s\n",
1979 sc->vr_dev.dv_xname, intrstr);
1980 }
1981 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1982 if (sc->vr_ats == NULL)
1983 printf("%s: warning: couldn't establish shutdown hook\n",
1984 sc->vr_dev.dv_xname);
1985
1986 /* Reset the adapter. */
1987 vr_reset(sc);
1988
1989 /*
1990 * Get station address. The way the Rhine chips work,
1991 * you're not allowed to directly access the EEPROM once
1992 * they've been programmed a special way. Consequently,
1993 * we need to read the node address from the PAR0 and PAR1
1994 * registers.
1995 */
1996 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1997 DELAY(200);
1998 for (i = 0; i < ETHER_ADDR_LEN; i++)
1999 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
2000
2001 /*
2002 * A Rhine chip was detected. Inform the world.
2003 */
2004 printf("%s: Ethernet address: %s\n",
2005 sc->vr_dev.dv_xname, ether_sprintf(eaddr));
2006
2007 bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN);
2008
2009 sc->vr_ldata_ptr = malloc(sizeof (struct vr_list_data) + 8,
2010 M_DEVBUF, M_NOWAIT);
2011 if (sc->vr_ldata_ptr == NULL) {
2012 free(sc, M_DEVBUF);
2013 printf("%s: no memory for list buffers!\n",
2014 sc->vr_dev.dv_xname);
2015 return;
2016 }
2017
2018 sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr;
2019 round = (unsigned long)sc->vr_ldata_ptr & 0xF;
2020 roundptr = sc->vr_ldata_ptr;
2021 for (i = 0; i < 8; i++) {
2022 if (round % 8) {
2023 round++;
2024 roundptr++;
2025 } else
2026 break;
2027 }
2028 sc->vr_ldata = (struct vr_list_data *)roundptr;
2029 bzero(sc->vr_ldata, sizeof (struct vr_list_data));
2030
2031 ifp = &sc->vr_ec.ec_if;
2032 ifp->if_softc = sc;
2033 ifp->if_mtu = ETHERMTU;
2034 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2035 ifp->if_ioctl = vr_ioctl;
2036 ifp->if_output = ether_output;
2037 ifp->if_start = vr_start;
2038 ifp->if_watchdog = vr_watchdog;
2039 ifp->if_baudrate = 10000000;
2040 bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2041
2042 for (i = VR_PHYADDR_MIN; i < VR_PHYADDR_MAX + 1; i++) {
2043 sc->vr_phy_addr = i;
2044 vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
2045 DELAY(500);
2046 while (vr_phy_readreg(sc, PHY_BMCR)
2047 & PHY_BMCR_RESET);
2048 if ((phy_sts = vr_phy_readreg(sc, PHY_BMSR)))
2049 break;
2050 }
2051 if (phy_sts) {
2052 phy_vid = vr_phy_readreg(sc, PHY_VENID);
2053 phy_did = vr_phy_readreg(sc, PHY_DEVID);
2054 p = vr_phys;
2055 while (p->vr_vid) {
2056 if (phy_vid == p->vr_vid &&
2057 (phy_did | 0x000F) == p->vr_did) {
2058 sc->vr_pinfo = p;
2059 break;
2060 }
2061 p++;
2062 }
2063 if (sc->vr_pinfo == NULL)
2064 sc->vr_pinfo = &vr_phys[PHY_UNKNOWN];
2065 } else {
2066 printf("%s: MII without any phy!\n",
2067 sc->vr_dev.dv_xname);
2068 goto fail;
2069 }
2070
2071 /*
2072 * Do ifmedia setup.
2073 */
2074 ifmedia_init(&sc->ifmedia, 0, vr_ifmedia_upd, vr_ifmedia_sts);
2075
2076 vr_getmode_mii(sc);
2077 vr_autoneg_mii(sc, VR_FLAG_FORCEDELAY, 1);
2078 media = sc->ifmedia.ifm_media;
2079 vr_stop(sc);
2080
2081 ifmedia_set(&sc->ifmedia, media);
2082
2083 /*
2084 * Call MI attach routines.
2085 */
2086 if_attach(ifp);
2087 ether_ifattach(ifp, sc->vr_enaddr);
2088
2089 #if NBPFILTER > 0
2090 bpfattach(&sc->vr_ec.ec_if.if_bpf,
2091 ifp, DLT_EN10MB, sizeof (struct ether_header));
2092 #endif
2093
2094 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
2095 if (sc->vr_ats == NULL)
2096 printf("%s: warning: couldn't establish shutdown hook\n",
2097 sc->vr_dev.dv_xname);
2098
2099 fail:
2100 return;
2101 }
2102