if_vge.c revision 1.4.4.2 1 /* $NetBSD: if_vge.c,v 1.4.4.2 2005/03/19 08:35:11 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2004
5 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.4.4.2 2005/03/19 08:35:11 yamt Exp $");
39
40 /*
41 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
42 *
43 * Written by Bill Paul <wpaul (at) windriver.com>
44 * Senior Networking Software Engineer
45 * Wind River Systems
46 */
47
48 /*
49 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
50 * combines a tri-speed ethernet MAC and PHY, with the following
51 * features:
52 *
53 * o Jumbo frame support up to 16K
54 * o Transmit and receive flow control
55 * o IPv4 checksum offload
56 * o VLAN tag insertion and stripping
57 * o TCP large send
58 * o 64-bit multicast hash table filter
59 * o 64 entry CAM filter
60 * o 16K RX FIFO and 48K TX FIFO memory
61 * o Interrupt moderation
62 *
63 * The VT6122 supports up to four transmit DMA queues. The descriptors
64 * in the transmit ring can address up to 7 data fragments; frames which
65 * span more than 7 data buffers must be coalesced, but in general the
66 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
67 * long. The receive descriptors address only a single buffer.
68 *
69 * There are two peculiar design issues with the VT6122. One is that
70 * receive data buffers must be aligned on a 32-bit boundary. This is
71 * not a problem where the VT6122 is used as a LOM device in x86-based
72 * systems, but on architectures that generate unaligned access traps, we
73 * have to do some copying.
74 *
75 * The other issue has to do with the way 64-bit addresses are handled.
76 * The DMA descriptors only allow you to specify 48 bits of addressing
77 * information. The remaining 16 bits are specified using one of the
78 * I/O registers. If you only have a 32-bit system, then this isn't
79 * an issue, but if you have a 64-bit system and more than 4GB of
80 * memory, you must have to make sure your network data buffers reside
81 * in the same 48-bit 'segment.'
82 *
83 * Special thanks to Ryan Fu at VIA Networking for providing documentation
84 * and sample NICs for testing.
85 */
86
87 #include "bpfilter.h"
88
89 #include <sys/param.h>
90 #include <sys/endian.h>
91 #include <sys/systm.h>
92 #include <sys/sockio.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97
98 #include <net/if.h>
99 #include <net/if_arp.h>
100 #include <net/if_ether.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103
104 #include <net/bpf.h>
105
106 #include <machine/bus.h>
107
108 #include <dev/mii/mii.h>
109 #include <dev/mii/miivar.h>
110
111 #include <dev/pci/pcireg.h>
112 #include <dev/pci/pcivar.h>
113 #include <dev/pci/pcidevs.h>
114
115 #include <dev/pci/if_vgereg.h>
116 #include <dev/pci/if_vgevar.h>
117
118 static int vge_probe (struct device *, struct cfdata *, void *);
119 static void vge_attach (struct device *, struct device *, void *);
120
121 static int vge_encap (struct vge_softc *, struct mbuf *, int);
122
123 static int vge_dma_map_rx_desc (struct vge_softc *, int);
124 static void vge_dma_map_tx_desc (struct vge_softc *, struct mbuf *, int, int);
125 static int vge_allocmem (struct vge_softc *);
126 static int vge_newbuf (struct vge_softc *, int, struct mbuf *);
127 static int vge_rx_list_init (struct vge_softc *);
128 static int vge_tx_list_init (struct vge_softc *);
129 #ifdef VGE_FIXUP_RX
130 static __inline void vge_fixup_rx
131 (struct mbuf *);
132 #endif
133 static void vge_rxeof (struct vge_softc *);
134 static void vge_txeof (struct vge_softc *);
135 static int vge_intr (void *);
136 static void vge_tick (void *);
137 static void vge_start (struct ifnet *);
138 static int vge_ioctl (struct ifnet *, u_long, caddr_t);
139 static int vge_init (struct ifnet *);
140 static void vge_stop (struct vge_softc *);
141 static void vge_watchdog (struct ifnet *);
142 #if VGE_POWER_MANAGEMENT
143 static int vge_suspend (struct device *);
144 static int vge_resume (struct device *);
145 #endif
146 static void vge_shutdown (void *);
147 static int vge_ifmedia_upd (struct ifnet *);
148 static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
149
150 static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *);
151 static void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int);
152
153 static void vge_miipoll_start (struct vge_softc *);
154 static void vge_miipoll_stop (struct vge_softc *);
155 static int vge_miibus_readreg (struct device *, int, int);
156 static void vge_miibus_writereg (struct device *, int, int, int);
157 static void vge_miibus_statchg (struct device *);
158
159 static void vge_cam_clear (struct vge_softc *);
160 static int vge_cam_set (struct vge_softc *, uint8_t *);
161 static void vge_setmulti (struct vge_softc *);
162 static void vge_reset (struct vge_softc *);
163
164 #define VGE_PCI_LOIO 0x10
165 #define VGE_PCI_LOMEM 0x14
166
167 CFATTACH_DECL(vge, sizeof(struct vge_softc),
168 vge_probe, vge_attach, NULL, NULL);
169
170 /*
171 * Defragment mbuf chain contents to be as linear as possible.
172 * Returns new mbuf chain on success, NULL on failure. Old mbuf
173 * chain is always freed.
174 * XXX temporary until there would be generic function doing this.
175 */
176 #define m_defrag vge_m_defrag
177 struct mbuf * vge_m_defrag(struct mbuf *, int);
178
179 struct mbuf *
180 vge_m_defrag(struct mbuf *mold, int flags)
181 {
182 struct mbuf *m0, *mn, *n;
183 size_t sz = mold->m_pkthdr.len;
184
185 #ifdef DIAGNOSTIC
186 if ((mold->m_flags & M_PKTHDR) == 0)
187 panic("m_defrag: not a mbuf chain header");
188 #endif
189
190 MGETHDR(m0, flags, MT_DATA);
191 if (m0 == NULL)
192 return NULL;
193 m0->m_pkthdr.len = mold->m_pkthdr.len;
194 mn = m0;
195
196 do {
197 if (sz > MHLEN) {
198 MCLGET(mn, M_DONTWAIT);
199 if ((mn->m_flags & M_EXT) == 0) {
200 m_freem(m0);
201 return NULL;
202 }
203 }
204
205 mn->m_len = MIN(sz, MCLBYTES);
206
207 m_copydata(mold, mold->m_pkthdr.len - sz, mn->m_len,
208 mtod(mn, caddr_t));
209
210 sz -= mn->m_len;
211
212 if (sz > 0) {
213 /* need more mbufs */
214 MGET(n, M_NOWAIT, MT_DATA);
215 if (n == NULL) {
216 m_freem(m0);
217 return NULL;
218 }
219
220 mn->m_next = n;
221 mn = n;
222 }
223 } while (sz > 0);
224
225 return m0;
226 }
227
228 /*
229 * Read a word of data stored in the EEPROM at address 'addr.'
230 */
231 static void
232 vge_eeprom_getword(sc, addr, dest)
233 struct vge_softc *sc;
234 int addr;
235 u_int16_t *dest;
236 {
237 register int i;
238 u_int16_t word = 0;
239
240 /*
241 * Enter EEPROM embedded programming mode. In order to
242 * access the EEPROM at all, we first have to set the
243 * EELOAD bit in the CHIPCFG2 register.
244 */
245 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
246 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
247
248 /* Select the address of the word we want to read */
249 CSR_WRITE_1(sc, VGE_EEADDR, addr);
250
251 /* Issue read command */
252 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
253
254 /* Wait for the done bit to be set. */
255 for (i = 0; i < VGE_TIMEOUT; i++) {
256 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
257 break;
258 }
259
260 if (i == VGE_TIMEOUT) {
261 printf("%s: EEPROM read timed out\n", sc->sc_dev.dv_xname);
262 *dest = 0;
263 return;
264 }
265
266 /* Read the result */
267 word = CSR_READ_2(sc, VGE_EERDDAT);
268
269 /* Turn off EEPROM access mode. */
270 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
271 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
272
273 *dest = word;
274
275 return;
276 }
277
278 /*
279 * Read a sequence of words from the EEPROM.
280 */
281 static void
282 vge_read_eeprom(sc, dest, off, cnt, swap)
283 struct vge_softc *sc;
284 caddr_t dest;
285 int off;
286 int cnt;
287 int swap;
288 {
289 int i;
290 u_int16_t word = 0, *ptr;
291
292 for (i = 0; i < cnt; i++) {
293 vge_eeprom_getword(sc, off + i, &word);
294 ptr = (u_int16_t *)(dest + (i * 2));
295 if (swap)
296 *ptr = ntohs(word);
297 else
298 *ptr = word;
299 }
300 }
301
302 static void
303 vge_miipoll_stop(sc)
304 struct vge_softc *sc;
305 {
306 int i;
307
308 CSR_WRITE_1(sc, VGE_MIICMD, 0);
309
310 for (i = 0; i < VGE_TIMEOUT; i++) {
311 DELAY(1);
312 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
313 break;
314 }
315
316 if (i == VGE_TIMEOUT) {
317 printf("%s: failed to idle MII autopoll\n",
318 sc->sc_dev.dv_xname);
319 }
320
321 return;
322 }
323
324 static void
325 vge_miipoll_start(sc)
326 struct vge_softc *sc;
327 {
328 int i;
329
330 /* First, make sure we're idle. */
331
332 CSR_WRITE_1(sc, VGE_MIICMD, 0);
333 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
334
335 for (i = 0; i < VGE_TIMEOUT; i++) {
336 DELAY(1);
337 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
338 break;
339 }
340
341 if (i == VGE_TIMEOUT) {
342 printf("%s: failed to idle MII autopoll\n",
343 sc->sc_dev.dv_xname);
344 return;
345 }
346
347 /* Now enable auto poll mode. */
348
349 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
350
351 /* And make sure it started. */
352
353 for (i = 0; i < VGE_TIMEOUT; i++) {
354 DELAY(1);
355 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
356 break;
357 }
358
359 if (i == VGE_TIMEOUT) {
360 printf("%s: failed to start MII autopoll\n",
361 sc->sc_dev.dv_xname);
362 }
363 }
364
365 static int
366 vge_miibus_readreg(dev, phy, reg)
367 struct device *dev;
368 int phy, reg;
369 {
370 struct vge_softc *sc = (struct vge_softc *)dev;
371 int i;
372 u_int16_t rval = 0;
373
374 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
375 return(0);
376
377 VGE_LOCK(sc);
378 vge_miipoll_stop(sc);
379
380 /* Specify the register we want to read. */
381 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
382
383 /* Issue read command. */
384 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
385
386 /* Wait for the read command bit to self-clear. */
387 for (i = 0; i < VGE_TIMEOUT; i++) {
388 DELAY(1);
389 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
390 break;
391 }
392
393 if (i == VGE_TIMEOUT)
394 printf("%s: MII read timed out\n", sc->sc_dev.dv_xname);
395 else
396 rval = CSR_READ_2(sc, VGE_MIIDATA);
397
398 vge_miipoll_start(sc);
399 VGE_UNLOCK(sc);
400
401 return (rval);
402 }
403
404 static void
405 vge_miibus_writereg(dev, phy, reg, data)
406 struct device *dev;
407 int phy, reg, data;
408 {
409 struct vge_softc *sc = (struct vge_softc *)dev;
410 int i;
411
412 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
413 return;
414
415 VGE_LOCK(sc);
416 vge_miipoll_stop(sc);
417
418 /* Specify the register we want to write. */
419 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
420
421 /* Specify the data we want to write. */
422 CSR_WRITE_2(sc, VGE_MIIDATA, data);
423
424 /* Issue write command. */
425 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
426
427 /* Wait for the write command bit to self-clear. */
428 for (i = 0; i < VGE_TIMEOUT; i++) {
429 DELAY(1);
430 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
431 break;
432 }
433
434 if (i == VGE_TIMEOUT) {
435 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
436 }
437
438 vge_miipoll_start(sc);
439 VGE_UNLOCK(sc);
440 }
441
442 static void
443 vge_cam_clear(sc)
444 struct vge_softc *sc;
445 {
446 int i;
447
448 /*
449 * Turn off all the mask bits. This tells the chip
450 * that none of the entries in the CAM filter are valid.
451 * desired entries will be enabled as we fill the filter in.
452 */
453
454 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
455 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
456 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
457 for (i = 0; i < 8; i++)
458 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
459
460 /* Clear the VLAN filter too. */
461
462 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
463 for (i = 0; i < 8; i++)
464 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
465
466 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
467 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
468 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
469
470 sc->vge_camidx = 0;
471
472 return;
473 }
474
475 static int
476 vge_cam_set(sc, addr)
477 struct vge_softc *sc;
478 uint8_t *addr;
479 {
480 int i, error = 0;
481
482 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
483 return(ENOSPC);
484
485 /* Select the CAM data page. */
486 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
487 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
488
489 /* Set the filter entry we want to update and enable writing. */
490 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
491
492 /* Write the address to the CAM registers */
493 for (i = 0; i < ETHER_ADDR_LEN; i++)
494 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
495
496 /* Issue a write command. */
497 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
498
499 /* Wake for it to clear. */
500 for (i = 0; i < VGE_TIMEOUT; i++) {
501 DELAY(1);
502 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
503 break;
504 }
505
506 if (i == VGE_TIMEOUT) {
507 printf("%s: setting CAM filter failed\n", sc->sc_dev.dv_xname);
508 error = EIO;
509 goto fail;
510 }
511
512 /* Select the CAM mask page. */
513 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
514 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
515
516 /* Set the mask bit that enables this filter. */
517 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
518 1<<(sc->vge_camidx & 7));
519
520 sc->vge_camidx++;
521
522 fail:
523 /* Turn off access to CAM. */
524 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
525 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
526 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
527
528 return (error);
529 }
530
531 /*
532 * Program the multicast filter. We use the 64-entry CAM filter
533 * for perfect filtering. If there's more than 64 multicast addresses,
534 * we use the hash filter insted.
535 */
536 static void
537 vge_setmulti(sc)
538 struct vge_softc *sc;
539 {
540 struct ifnet *ifp;
541 int error = 0;
542 u_int32_t h, hashes[2] = { 0, 0 };
543 struct ether_multi *enm;
544 struct ether_multistep step;
545
546 ifp = &sc->sc_ethercom.ec_if;
547
548 /* First, zot all the multicast entries. */
549 vge_cam_clear(sc);
550 CSR_WRITE_4(sc, VGE_MAR0, 0);
551 CSR_WRITE_4(sc, VGE_MAR1, 0);
552
553 /*
554 * If the user wants allmulti or promisc mode, enable reception
555 * of all multicast frames.
556 */
557 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
558 allmulti:
559 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
560 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
561 return;
562 }
563
564 /* Now program new ones */
565 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
566 while(enm != NULL) {
567 /*
568 * If multicast range, fall back to ALLMULTI.
569 */
570 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
571 ETHER_ADDR_LEN) != 0)
572 goto allmulti;
573
574 error = vge_cam_set(sc,
575 LLADDR((struct sockaddr_dl *)enm->enm_addrlo));
576 if (error)
577 break;
578
579 ETHER_NEXT_MULTI(step, enm);
580 }
581
582 /* If there were too many addresses, use the hash filter. */
583 if (error) {
584 vge_cam_clear(sc);
585
586 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
587 while(enm != NULL) {
588 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
589 enm->enm_addrlo), ETHER_ADDR_LEN) >> 26;
590 if (h < 32)
591 hashes[0] |= (1 << h);
592 else
593 hashes[1] |= (1 << (h - 32));
594 }
595
596 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
597 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
598 }
599
600 return;
601 }
602
603 static void
604 vge_reset(sc)
605 struct vge_softc *sc;
606 {
607 register int i;
608
609 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
610
611 for (i = 0; i < VGE_TIMEOUT; i++) {
612 DELAY(5);
613 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
614 break;
615 }
616
617 if (i == VGE_TIMEOUT) {
618 printf("%s: soft reset timed out", sc->sc_dev.dv_xname);
619 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
620 DELAY(2000);
621 }
622
623 DELAY(5000);
624
625 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
626
627 for (i = 0; i < VGE_TIMEOUT; i++) {
628 DELAY(5);
629 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
630 break;
631 }
632
633 if (i == VGE_TIMEOUT) {
634 printf("%s: EEPROM reload timed out\n", sc->sc_dev.dv_xname);
635 return;
636 }
637
638 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
639
640 return;
641 }
642
643 /*
644 * Probe for a VIA gigabit chip. Check the PCI vendor and device
645 * IDs against our list and return a device name if we find a match.
646 */
647 static int
648 vge_probe(struct device *parent, struct cfdata *match, void *aux)
649 {
650 struct pci_attach_args *pa = aux;
651
652 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH
653 && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X)
654 return 1;
655
656 return (0);
657 }
658
659 static int
660 vge_dma_map_rx_desc(sc, idx)
661 struct vge_softc *sc;
662 int idx;
663 {
664 struct vge_rx_desc *d = NULL;
665 bus_dma_segment_t *segs;
666
667 /*
668 * Map the segment array into descriptors.
669 */
670
671 d = &sc->vge_ldata.vge_rx_list[idx];
672
673 /* If this descriptor is still owned by the chip, bail. */
674
675 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
676 printf("%s: tried to map busy descriptor\n",
677 sc->sc_dev.dv_xname);
678 return (EBUSY);
679 }
680
681 segs = sc->vge_ldata.vge_rx_dmamap[idx]->dm_segs;
682
683 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I);
684 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
685 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
686 d->vge_sts = 0;
687 d->vge_ctl = 0;
688
689 return (0);
690 }
691
692 static void
693 vge_dma_map_tx_desc(sc, m0, idx, flags)
694 struct vge_softc *sc;
695 struct mbuf *m0;
696 int idx, flags;
697 {
698 struct vge_tx_desc *d = &sc->vge_ldata.vge_tx_list[idx];
699 struct vge_tx_frag *f;
700 int i = 0;
701 bus_dma_segment_t *segs;
702 size_t sz;
703 bus_dmamap_t map = sc->vge_ldata.vge_tx_dmamap[idx];
704
705 /* Map the segment array into descriptors. */
706
707 segs = map->dm_segs;
708 for (i = 0; i < map->dm_nsegs; i++) {
709 f = &d->vge_frag[i];
710 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len));
711 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr));
712 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF);
713 }
714
715 /* Argh. This chip does not autopad short frames */
716
717 sz = m0->m_pkthdr.len;
718 if (m0->m_pkthdr.len < VGE_MIN_FRAMELEN) {
719 f = &d->vge_frag[i];
720 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - sz));
721 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
722 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
723 sz = VGE_MIN_FRAMELEN;
724 i++;
725 }
726
727 /*
728 * When telling the chip how many segments there are, we
729 * must use nsegs + 1 instead of just nsegs. Darned if I
730 * know why.
731 */
732 i++;
733
734 d->vge_sts = sz << 16;
735 d->vge_ctl = flags|(i << 28)|VGE_TD_LS_NORM;
736
737 if (sz > ETHERMTU + ETHER_HDR_LEN)
738 d->vge_ctl |= VGE_TDCTL_JUMBO;
739 }
740
741 static int
742 vge_allocmem(sc)
743 struct vge_softc *sc;
744 {
745 int error;
746 int nseg;
747 int i;
748 bus_dma_segment_t seg;
749
750 /*
751 * Allocate map for TX descriptor list.
752 */
753 error = bus_dmamap_create(sc->vge_dmat,
754 round_page(VGE_TX_LIST_SZ), 1, round_page(VGE_TX_LIST_SZ),
755 0, BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT,
756 &sc->vge_ldata.vge_tx_list_map);
757 if (error) {
758 printf("%s: could not allocate TX dma list map\n",
759 sc->sc_dev.dv_xname);
760 return (ENOMEM);
761 }
762
763 /*
764 * Allocate memory for TX descriptor list.
765 */
766
767 error = bus_dmamem_alloc(sc->vge_dmat, VGE_TX_LIST_SZ, VGE_RING_ALIGN,
768 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
769 if (error) {
770 printf("%s: could not allocate TX ring dma memory\n",
771 sc->sc_dev.dv_xname);
772 return (ENOMEM);
773 }
774
775 /* Map the memory to kernel VA space */
776
777 error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, seg.ds_len,
778 (caddr_t *) &sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT);
779 if (error) {
780 printf("%s: could not map TX ring dma memory\n",
781 sc->sc_dev.dv_xname);
782 return (ENOMEM);
783 }
784
785 /* Load the map for the TX ring. */
786 error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_tx_list_map,
787 sc->vge_ldata.vge_tx_list, seg.ds_len, NULL, BUS_DMA_NOWAIT);
788 if (error) {
789 printf("%s: could not load TX ring dma memory\n",
790 sc->sc_dev.dv_xname);
791 return (ENOMEM);
792 }
793
794 sc->vge_ldata.vge_tx_list_addr =
795 sc->vge_ldata.vge_tx_list_map->dm_segs[0].ds_addr;
796
797 /* Create DMA maps for TX buffers */
798
799 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
800 error = bus_dmamap_create(sc->vge_dmat, VGE_TX_MAXLEN,
801 VGE_TX_FRAGS, VGE_TX_MAXLEN, 0,
802 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
803 &sc->vge_ldata.vge_tx_dmamap[i]);
804 if (error) {
805 printf("%s: can't create DMA map for TX\n",
806 sc->sc_dev.dv_xname);
807 return (ENOMEM);
808 }
809 }
810
811 /*
812 * Allocate map for RX descriptor list.
813 */
814 error = bus_dmamap_create(sc->vge_dmat,
815 round_page(VGE_RX_LIST_SZ), 1, round_page(VGE_RX_LIST_SZ),
816 0, BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT,
817 &sc->vge_ldata.vge_rx_list_map);
818 if (error) {
819 printf("%s: could not allocate RX dma list map\n",
820 sc->sc_dev.dv_xname);
821 return (ENOMEM);
822 }
823
824 /* Allocate DMA'able memory for the RX ring */
825
826 error = bus_dmamem_alloc(sc->vge_dmat, VGE_RX_LIST_SZ, VGE_RING_ALIGN,
827 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
828 if (error)
829 return (ENOMEM);
830
831 /* Map the memory to kernel VA space */
832
833 error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, seg.ds_len,
834 (caddr_t *) &sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT);
835 if (error)
836 return (ENOMEM);
837
838 /* Load the map for the RX ring. */
839 error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_rx_list_map,
840 sc->vge_ldata.vge_rx_list, seg.ds_len, NULL, BUS_DMA_NOWAIT);
841 if (error) {
842 printf("%s: could not load RX ring dma memory\n",
843 sc->sc_dev.dv_xname);
844 return (ENOMEM);
845 }
846
847 sc->vge_ldata.vge_rx_list_addr =
848 sc->vge_ldata.vge_rx_list_map->dm_segs[0].ds_addr;
849
850 /* Create DMA maps for RX buffers */
851
852 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
853 error = bus_dmamap_create(sc->vge_dmat, MCLBYTES,
854 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
855 &sc->vge_ldata.vge_rx_dmamap[i]);
856 if (error) {
857 printf("%s: can't create DMA map for RX\n",
858 sc->sc_dev.dv_xname);
859 return (ENOMEM);
860 }
861 }
862
863 return (0);
864 }
865
866 /*
867 * Attach the interface. Allocate softc structures, do ifmedia
868 * setup and ethernet/BPF attach.
869 */
870 static void
871 vge_attach(struct device *parent, struct device *self, void *aux)
872 {
873 u_char eaddr[ETHER_ADDR_LEN];
874 struct vge_softc *sc = (struct vge_softc *)self;
875 struct ifnet *ifp;
876 struct pci_attach_args *pa = aux;
877 pci_chipset_tag_t pc = pa->pa_pc;
878 const char *intrstr;
879 pci_intr_handle_t ih;
880
881 aprint_normal(": VIA VT612X Gigabit Ethernet (rev. %#x)\n",
882 PCI_REVISION(pa->pa_class));
883
884 /* Make sure bus-mastering is enabled */
885 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
886 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
887 PCI_COMMAND_MASTER_ENABLE);
888
889 /*
890 * Map control/status registers.
891 */
892 if (0 != pci_mapreg_map(pa, VGE_PCI_LOMEM,
893 PCI_MAPREG_TYPE_MEM, BUS_SPACE_MAP_LINEAR,
894 &sc->vge_btag, &sc->vge_bhandle, NULL, NULL)) {
895 aprint_error("%s: couldn't map memory\n",
896 sc->sc_dev.dv_xname);
897 return;
898 }
899
900 /*
901 * Map and establish our interrupt.
902 */
903 if (pci_intr_map(pa, &ih)) {
904 aprint_error("%s: unable to map interrupt\n",
905 sc->sc_dev.dv_xname);
906 return;
907 }
908 intrstr = pci_intr_string(pc, ih);
909 sc->vge_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc);
910 if (sc->vge_intrhand == NULL) {
911 printf("%s: unable to establish interrupt",
912 sc->sc_dev.dv_xname);
913 if (intrstr != NULL)
914 printf(" at %s", intrstr);
915 printf("\n");
916 return;
917 }
918 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
919
920 /* Reset the adapter. */
921 vge_reset(sc);
922
923 /*
924 * Get station address from the EEPROM.
925 */
926 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
927 bcopy(eaddr, (char *)&sc->vge_eaddr, ETHER_ADDR_LEN);
928
929 printf("%s: Ethernet address: %s\n", sc->sc_dev.dv_xname,
930 ether_sprintf(eaddr));
931
932 /*
933 * Use the 32bit tag. Hardware supports 48bit physical addresses,
934 * but we don't use that for now.
935 */
936 sc->vge_dmat = pa->pa_dmat;
937
938 if (vge_allocmem(sc))
939 return;
940
941 ifp = &sc->sc_ethercom.ec_if;
942 ifp->if_softc = sc;
943 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
944 ifp->if_mtu = ETHERMTU;
945 ifp->if_baudrate = IF_Gbps(1);
946 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
947 ifp->if_ioctl = vge_ioctl;
948 ifp->if_start = vge_start;
949
950 /*
951 * We can support 802.1Q VLAN-sized frames and jumbo
952 * Ethernet frames.
953 */
954 sc->sc_ethercom.ec_capabilities |=
955 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU |
956 ETHERCAP_VLAN_HWTAGGING;
957
958 /*
959 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
960 */
961 ifp->if_capabilities |= IFCAP_CSUM_IPv4 |
962 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
963
964 #ifdef DEVICE_POLLING
965 #ifdef IFCAP_POLLING
966 ifp->if_capabilities |= IFCAP_POLLING;
967 #endif
968 #endif
969 ifp->if_watchdog = vge_watchdog;
970 ifp->if_init = vge_init;
971 IFQ_SET_MAXLEN(&ifp->if_snd, max(VGE_IFQ_MAXLEN, IFQ_MAXLEN));
972
973 /*
974 * Initialize our media structures and probe the MII.
975 */
976 sc->sc_mii.mii_ifp = ifp;
977 sc->sc_mii.mii_readreg = vge_miibus_readreg;
978 sc->sc_mii.mii_writereg = vge_miibus_writereg;
979 sc->sc_mii.mii_statchg = vge_miibus_statchg;
980 ifmedia_init(&sc->sc_mii.mii_media, 0, vge_ifmedia_upd,
981 vge_ifmedia_sts);
982 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
983 MII_OFFSET_ANY, MIIF_DOPAUSE);
984 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
985 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
986 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
987 } else
988 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
989
990 /*
991 * Attach the interface.
992 */
993 if_attach(ifp);
994 ether_ifattach(ifp, eaddr);
995
996 callout_init(&sc->vge_timeout);
997 callout_setfunc(&sc->vge_timeout, vge_tick, sc);
998
999 /*
1000 * Make sure the interface is shutdown during reboot.
1001 */
1002 if (shutdownhook_establish(vge_shutdown, sc) == NULL) {
1003 printf("%s: WARNING: unable to establish shutdown hook\n",
1004 sc->sc_dev.dv_xname);
1005 }
1006 }
1007
1008 static int
1009 vge_newbuf(sc, idx, m)
1010 struct vge_softc *sc;
1011 int idx;
1012 struct mbuf *m;
1013 {
1014 struct mbuf *n = NULL;
1015 int i, error;
1016
1017 if (m == NULL) {
1018 n = m_gethdr(M_DONTWAIT, MT_DATA);
1019 if (n == NULL)
1020 return (ENOBUFS);
1021
1022 m_clget(n, M_DONTWAIT);
1023 if ((n->m_flags & M_EXT) == 0) {
1024 m_freem(n);
1025 return (ENOBUFS);
1026 }
1027
1028 m = n;
1029 } else
1030 m->m_data = m->m_ext.ext_buf;
1031
1032
1033 #ifdef VGE_FIXUP_RX
1034 /*
1035 * This is part of an evil trick to deal with non-x86 platforms.
1036 * The VIA chip requires RX buffers to be aligned on 32-bit
1037 * boundaries, but that will hose non-x86 machines. To get around
1038 * this, we leave some empty space at the start of each buffer
1039 * and for non-x86 hosts, we copy the buffer back two bytes
1040 * to achieve word alignment. This is slightly more efficient
1041 * than allocating a new buffer, copying the contents, and
1042 * discarding the old buffer.
1043 */
1044 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
1045 m_adj(m, VGE_ETHER_ALIGN);
1046 #else
1047 m->m_len = m->m_pkthdr.len = MCLBYTES;
1048 #endif
1049
1050 error = bus_dmamap_load_mbuf(sc->vge_dmat,
1051 sc->vge_ldata.vge_rx_dmamap[idx], m, BUS_DMA_NOWAIT);
1052 if (error || vge_dma_map_rx_desc(sc, idx)) {
1053 if (n != NULL)
1054 m_freem(n);
1055 return (ENOMEM);
1056 }
1057
1058 /*
1059 * Note: the manual fails to document the fact that for
1060 * proper opration, the driver needs to replentish the RX
1061 * DMA ring 4 descriptors at a time (rather than one at a
1062 * time, like most chips). We can allocate the new buffers
1063 * but we should not set the OWN bits until we're ready
1064 * to hand back 4 of them in one shot.
1065 */
1066
1067 #define VGE_RXCHUNK 4
1068 sc->vge_rx_consumed++;
1069 if (sc->vge_rx_consumed == VGE_RXCHUNK) {
1070 for (i = idx; i != idx - sc->vge_rx_consumed; i--)
1071 sc->vge_ldata.vge_rx_list[i].vge_sts |=
1072 htole32(VGE_RDSTS_OWN);
1073 sc->vge_rx_consumed = 0;
1074 }
1075
1076 sc->vge_ldata.vge_rx_mbuf[idx] = m;
1077
1078 bus_dmamap_sync(sc->vge_dmat,
1079 sc->vge_ldata.vge_rx_dmamap[idx],
1080 0, sc->vge_ldata.vge_rx_dmamap[idx]->dm_mapsize,
1081 BUS_DMASYNC_PREREAD);
1082
1083 return (0);
1084 }
1085
1086 static int
1087 vge_tx_list_init(sc)
1088 struct vge_softc *sc;
1089 {
1090 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ);
1091 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf,
1092 (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
1093
1094 bus_dmamap_sync(sc->vge_dmat,
1095 sc->vge_ldata.vge_tx_list_map,
1096 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
1097 BUS_DMASYNC_PREWRITE);
1098
1099 sc->vge_ldata.vge_tx_prodidx = 0;
1100 sc->vge_ldata.vge_tx_considx = 0;
1101 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
1102
1103 return (0);
1104 }
1105
1106 static int
1107 vge_rx_list_init(sc)
1108 struct vge_softc *sc;
1109 {
1110 int i;
1111
1112 bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ);
1113 bzero ((char *)&sc->vge_ldata.vge_rx_mbuf,
1114 (VGE_RX_DESC_CNT * sizeof(struct mbuf *)));
1115
1116 sc->vge_rx_consumed = 0;
1117
1118 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1119 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
1120 return (ENOBUFS);
1121 }
1122
1123 /* Flush the RX descriptors */
1124
1125 bus_dmamap_sync(sc->vge_dmat,
1126 sc->vge_ldata.vge_rx_list_map,
1127 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,
1128 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1129
1130 sc->vge_ldata.vge_rx_prodidx = 0;
1131 sc->vge_rx_consumed = 0;
1132 sc->vge_head = sc->vge_tail = NULL;
1133
1134 return (0);
1135 }
1136
1137 #ifdef VGE_FIXUP_RX
1138 static __inline void
1139 vge_fixup_rx(m)
1140 struct mbuf *m;
1141 {
1142 int i;
1143 uint16_t *src, *dst;
1144
1145 src = mtod(m, uint16_t *);
1146 dst = src - 1;
1147
1148 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1149 *dst++ = *src++;
1150
1151 m->m_data -= ETHER_ALIGN;
1152
1153 return;
1154 }
1155 #endif
1156
1157 /*
1158 * RX handler. We support the reception of jumbo frames that have
1159 * been fragmented across multiple 2K mbuf cluster buffers.
1160 */
1161 static void
1162 vge_rxeof(sc)
1163 struct vge_softc *sc;
1164 {
1165 struct mbuf *m;
1166 struct ifnet *ifp;
1167 int i, total_len;
1168 int lim = 0;
1169 struct vge_rx_desc *cur_rx;
1170 u_int32_t rxstat, rxctl;
1171
1172 VGE_LOCK_ASSERT(sc);
1173 ifp = &sc->sc_ethercom.ec_if;
1174 i = sc->vge_ldata.vge_rx_prodidx;
1175
1176 /* Invalidate the descriptor memory */
1177
1178 bus_dmamap_sync(sc->vge_dmat,
1179 sc->vge_ldata.vge_rx_list_map,
1180 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,
1181 BUS_DMASYNC_POSTREAD);
1182
1183 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) {
1184
1185 #ifdef DEVICE_POLLING
1186 if (ifp->if_flags & IFF_POLLING) {
1187 if (sc->rxcycles <= 0)
1188 break;
1189 sc->rxcycles--;
1190 }
1191 #endif /* DEVICE_POLLING */
1192
1193 cur_rx = &sc->vge_ldata.vge_rx_list[i];
1194 m = sc->vge_ldata.vge_rx_mbuf[i];
1195 total_len = VGE_RXBYTES(cur_rx);
1196 rxstat = le32toh(cur_rx->vge_sts);
1197 rxctl = le32toh(cur_rx->vge_ctl);
1198
1199 /* Invalidate the RX mbuf and unload its map */
1200
1201 bus_dmamap_sync(sc->vge_dmat,
1202 sc->vge_ldata.vge_rx_dmamap[i],
1203 0, sc->vge_ldata.vge_rx_dmamap[i]->dm_mapsize,
1204 BUS_DMASYNC_POSTWRITE);
1205 bus_dmamap_unload(sc->vge_dmat,
1206 sc->vge_ldata.vge_rx_dmamap[i]);
1207
1208 /*
1209 * If the 'start of frame' bit is set, this indicates
1210 * either the first fragment in a multi-fragment receive,
1211 * or an intermediate fragment. Either way, we want to
1212 * accumulate the buffers.
1213 */
1214 if (rxstat & VGE_RXPKT_SOF) {
1215 m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
1216 if (sc->vge_head == NULL)
1217 sc->vge_head = sc->vge_tail = m;
1218 else {
1219 m->m_flags &= ~M_PKTHDR;
1220 sc->vge_tail->m_next = m;
1221 sc->vge_tail = m;
1222 }
1223 vge_newbuf(sc, i, NULL);
1224 VGE_RX_DESC_INC(i);
1225 continue;
1226 }
1227
1228 /*
1229 * Bad/error frames will have the RXOK bit cleared.
1230 * However, there's one error case we want to allow:
1231 * if a VLAN tagged frame arrives and the chip can't
1232 * match it against the CAM filter, it considers this
1233 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1234 * We don't want to drop the frame though: our VLAN
1235 * filtering is done in software.
1236 */
1237 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM)
1238 && !(rxstat & VGE_RDSTS_CSUMERR)) {
1239 ifp->if_ierrors++;
1240 /*
1241 * If this is part of a multi-fragment packet,
1242 * discard all the pieces.
1243 */
1244 if (sc->vge_head != NULL) {
1245 m_freem(sc->vge_head);
1246 sc->vge_head = sc->vge_tail = NULL;
1247 }
1248 vge_newbuf(sc, i, m);
1249 VGE_RX_DESC_INC(i);
1250 continue;
1251 }
1252
1253 /*
1254 * If allocating a replacement mbuf fails,
1255 * reload the current one.
1256 */
1257
1258 if (vge_newbuf(sc, i, NULL)) {
1259 ifp->if_ierrors++;
1260 if (sc->vge_head != NULL) {
1261 m_freem(sc->vge_head);
1262 sc->vge_head = sc->vge_tail = NULL;
1263 }
1264 vge_newbuf(sc, i, m);
1265 VGE_RX_DESC_INC(i);
1266 continue;
1267 }
1268
1269 VGE_RX_DESC_INC(i);
1270
1271 if (sc->vge_head != NULL) {
1272 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
1273 /*
1274 * Special case: if there's 4 bytes or less
1275 * in this buffer, the mbuf can be discarded:
1276 * the last 4 bytes is the CRC, which we don't
1277 * care about anyway.
1278 */
1279 if (m->m_len <= ETHER_CRC_LEN) {
1280 sc->vge_tail->m_len -=
1281 (ETHER_CRC_LEN - m->m_len);
1282 m_freem(m);
1283 } else {
1284 m->m_len -= ETHER_CRC_LEN;
1285 m->m_flags &= ~M_PKTHDR;
1286 sc->vge_tail->m_next = m;
1287 }
1288 m = sc->vge_head;
1289 sc->vge_head = sc->vge_tail = NULL;
1290 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1291 } else
1292 m->m_pkthdr.len = m->m_len =
1293 (total_len - ETHER_CRC_LEN);
1294
1295 #ifdef VGE_FIXUP_RX
1296 vge_fixup_rx(m);
1297 #endif
1298 ifp->if_ipackets++;
1299 m->m_pkthdr.rcvif = ifp;
1300
1301 /* Do RX checksumming if enabled */
1302 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) {
1303
1304 /* Check IP header checksum */
1305 if (rxctl & VGE_RDCTL_IPPKT)
1306 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1307 if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0)
1308 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1309 }
1310
1311 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
1312 /* Check UDP checksum */
1313 if (rxctl & VGE_RDCTL_TCPPKT)
1314 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1315
1316 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1317 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1318 }
1319
1320 if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) {
1321 /* Check UDP checksum */
1322 if (rxctl & VGE_RDCTL_UDPPKT)
1323 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1324
1325 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1326 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1327 }
1328
1329 if (rxstat & VGE_RDSTS_VTAG)
1330 VLAN_INPUT_TAG(ifp, m,
1331 ntohs((rxctl & VGE_RDCTL_VLANID)), continue);
1332
1333 #if NBPFILTER > 0
1334 /*
1335 * Handle BPF listeners.
1336 */
1337 if (ifp->if_bpf)
1338 bpf_mtap(ifp->if_bpf, m);
1339 #endif
1340
1341 VGE_UNLOCK(sc);
1342 (*ifp->if_input)(ifp, m);
1343 VGE_LOCK(sc);
1344
1345 lim++;
1346 if (lim == VGE_RX_DESC_CNT)
1347 break;
1348
1349 }
1350
1351 /* Flush the RX DMA ring */
1352
1353 bus_dmamap_sync(sc->vge_dmat,
1354 sc->vge_ldata.vge_rx_list_map,
1355 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,
1356 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1357
1358 sc->vge_ldata.vge_rx_prodidx = i;
1359 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1360
1361
1362 return;
1363 }
1364
1365 static void
1366 vge_txeof(sc)
1367 struct vge_softc *sc;
1368 {
1369 struct ifnet *ifp;
1370 u_int32_t txstat;
1371 int idx;
1372
1373 ifp = &sc->sc_ethercom.ec_if;
1374 idx = sc->vge_ldata.vge_tx_considx;
1375
1376 /* Invalidate the TX descriptor list */
1377
1378 bus_dmamap_sync(sc->vge_dmat,
1379 sc->vge_ldata.vge_tx_list_map,
1380 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
1381 BUS_DMASYNC_POSTREAD);
1382
1383 while (idx != sc->vge_ldata.vge_tx_prodidx) {
1384
1385 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
1386 if (txstat & VGE_TDSTS_OWN)
1387 break;
1388
1389 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
1390 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
1391 bus_dmamap_unload(sc->vge_dmat,
1392 sc->vge_ldata.vge_tx_dmamap[idx]);
1393 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1394 ifp->if_collisions++;
1395 if (txstat & VGE_TDSTS_TXERR)
1396 ifp->if_oerrors++;
1397 else
1398 ifp->if_opackets++;
1399
1400 sc->vge_ldata.vge_tx_free++;
1401 VGE_TX_DESC_INC(idx);
1402 }
1403
1404 /* No changes made to the TX ring, so no flush needed */
1405
1406 if (idx != sc->vge_ldata.vge_tx_considx) {
1407 sc->vge_ldata.vge_tx_considx = idx;
1408 ifp->if_flags &= ~IFF_OACTIVE;
1409 ifp->if_timer = 0;
1410 }
1411
1412 /*
1413 * If not all descriptors have been released reaped yet,
1414 * reload the timer so that we will eventually get another
1415 * interrupt that will cause us to re-enter this routine.
1416 * This is done in case the transmitter has gone idle.
1417 */
1418 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) {
1419 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1420 }
1421
1422 return;
1423 }
1424
1425 static void
1426 vge_tick(xsc)
1427 void *xsc;
1428 {
1429 struct vge_softc *sc = xsc;
1430 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1431 struct mii_data *mii = &sc->sc_mii;
1432 int s;
1433
1434 s = splnet();
1435
1436 VGE_LOCK(sc);
1437
1438 callout_schedule(&sc->vge_timeout, hz);
1439
1440 mii_tick(mii);
1441 if (sc->vge_link) {
1442 if (!(mii->mii_media_status & IFM_ACTIVE))
1443 sc->vge_link = 0;
1444 } else {
1445 if (mii->mii_media_status & IFM_ACTIVE &&
1446 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1447 sc->vge_link = 1;
1448 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1449 vge_start(ifp);
1450 }
1451 }
1452
1453 VGE_UNLOCK(sc);
1454
1455 splx(s);
1456 }
1457
1458 #ifdef DEVICE_POLLING
1459 static void
1460 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1461 {
1462 struct vge_softc *sc = ifp->if_softc;
1463
1464 VGE_LOCK(sc);
1465 #ifdef IFCAP_POLLING
1466 if (!(ifp->if_capenable & IFCAP_POLLING)) {
1467 ether_poll_deregister(ifp);
1468 cmd = POLL_DEREGISTER;
1469 }
1470 #endif
1471 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1472 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1473 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
1474 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1475 goto done;
1476 }
1477
1478 sc->rxcycles = count;
1479 vge_rxeof(sc);
1480 vge_txeof(sc);
1481
1482 #if __FreeBSD_version < 502114
1483 if (ifp->if_snd.ifq_head != NULL)
1484 #else
1485 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1486 #endif
1487 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
1488
1489 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1490 u_int32_t status;
1491 status = CSR_READ_4(sc, VGE_ISR);
1492 if (status == 0xFFFFFFFF)
1493 goto done;
1494 if (status)
1495 CSR_WRITE_4(sc, VGE_ISR, status);
1496
1497 /*
1498 * XXX check behaviour on receiver stalls.
1499 */
1500
1501 if (status & VGE_ISR_TXDMA_STALL ||
1502 status & VGE_ISR_RXDMA_STALL)
1503 vge_init(sc);
1504
1505 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1506 vge_rxeof(sc);
1507 ifp->if_ierrors++;
1508 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1509 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1510 }
1511 }
1512 done:
1513 VGE_UNLOCK(sc);
1514 }
1515 #endif /* DEVICE_POLLING */
1516
1517 static int
1518 vge_intr(arg)
1519 void *arg;
1520 {
1521 struct vge_softc *sc = arg;
1522 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1523 u_int32_t status;
1524 int claim = 0;
1525
1526 if (sc->suspended) {
1527 return claim;
1528 }
1529
1530 VGE_LOCK(sc);
1531
1532 if (!(ifp->if_flags & IFF_UP)) {
1533 VGE_UNLOCK(sc);
1534 return claim;
1535 }
1536
1537 #ifdef DEVICE_POLLING
1538 if (ifp->if_flags & IFF_POLLING)
1539 goto done;
1540 if (
1541 #ifdef IFCAP_POLLING
1542 (ifp->if_capenable & IFCAP_POLLING) &&
1543 #endif
1544 ether_poll_register(vge_poll, ifp)) { /* ok, disable interrupts */
1545 CSR_WRITE_4(sc, VGE_IMR, 0);
1546 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1547 vge_poll(ifp, 0, 1);
1548 goto done;
1549 }
1550
1551 #endif /* DEVICE_POLLING */
1552
1553 /* Disable interrupts */
1554 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1555
1556 for (;;) {
1557
1558 status = CSR_READ_4(sc, VGE_ISR);
1559 /* If the card has gone away the read returns 0xffff. */
1560 if (status == 0xFFFFFFFF)
1561 break;
1562
1563 if (status) {
1564 claim = 1;
1565 CSR_WRITE_4(sc, VGE_ISR, status);
1566 }
1567
1568 if ((status & VGE_INTRS) == 0)
1569 break;
1570
1571 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1572 vge_rxeof(sc);
1573
1574 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1575 vge_rxeof(sc);
1576 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1577 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1578 }
1579
1580 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1581 vge_txeof(sc);
1582
1583 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1584 vge_init(ifp);
1585
1586 if (status & VGE_ISR_LINKSTS)
1587 vge_tick(sc);
1588 }
1589
1590 /* Re-enable interrupts */
1591 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1592
1593 #ifdef DEVICE_POLLING
1594 done:
1595 #endif
1596 VGE_UNLOCK(sc);
1597
1598 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1599 vge_start(ifp);
1600
1601 return claim;
1602 }
1603
1604 static int
1605 vge_encap(sc, m_head, idx)
1606 struct vge_softc *sc;
1607 struct mbuf *m_head;
1608 int idx;
1609 {
1610 struct mbuf *m_new = NULL;
1611 bus_dmamap_t map;
1612 int error, flags;
1613 struct m_tag *mtag;
1614
1615 /* If this descriptor is still owned by the chip, bail. */
1616 if (sc->vge_ldata.vge_tx_free <= 2
1617 || le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts) & VGE_TDSTS_OWN)
1618 return (ENOBUFS);
1619
1620 flags = 0;
1621
1622 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
1623 flags |= VGE_TDCTL_IPCSUM;
1624 if (m_head->m_pkthdr.csum_flags & M_CSUM_TCPv4)
1625 flags |= VGE_TDCTL_TCPCSUM;
1626 if (m_head->m_pkthdr.csum_flags & M_CSUM_UDPv4)
1627 flags |= VGE_TDCTL_UDPCSUM;
1628
1629 map = sc->vge_ldata.vge_tx_dmamap[idx];
1630 error = bus_dmamap_load_mbuf(sc->vge_dmat, map,
1631 m_head, BUS_DMA_NOWAIT);
1632
1633 /* If too many segments to map, coalesce */
1634 if (error == EFBIG) {
1635 m_new = m_defrag(m_head, M_DONTWAIT);
1636 if (m_new == NULL)
1637 return (error);
1638
1639 error = bus_dmamap_load_mbuf(sc->vge_dmat, map,
1640 m_new, BUS_DMA_NOWAIT);
1641 if (error) {
1642 m_freem(m_new);
1643 return (error);
1644 }
1645
1646 m_head = m_new;
1647 } else if (error)
1648 return (error);
1649
1650 vge_dma_map_tx_desc(sc, m_head, idx, flags);
1651
1652 sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
1653 sc->vge_ldata.vge_tx_free--;
1654
1655 /*
1656 * Set up hardware VLAN tagging.
1657 */
1658
1659 mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m_head);
1660 if (mtag != NULL)
1661 sc->vge_ldata.vge_tx_list[idx].vge_ctl |=
1662 htole32(htons(VLAN_TAG_VALUE(mtag)) | VGE_TDCTL_VTAG);
1663
1664 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN);
1665
1666 return (0);
1667 }
1668
1669 /*
1670 * Main transmit routine.
1671 */
1672
1673 static void
1674 vge_start(ifp)
1675 struct ifnet *ifp;
1676 {
1677 struct vge_softc *sc;
1678 struct mbuf *m_head = NULL;
1679 int idx, pidx = 0, error;
1680
1681 sc = ifp->if_softc;
1682 VGE_LOCK(sc);
1683
1684 if (!sc->vge_link
1685 || (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
1686 VGE_UNLOCK(sc);
1687 return;
1688 }
1689
1690 idx = sc->vge_ldata.vge_tx_prodidx;
1691
1692 pidx = idx - 1;
1693 if (pidx < 0)
1694 pidx = VGE_TX_DESC_CNT - 1;
1695
1696 /*
1697 * Loop through the send queue, setting up transmit descriptors
1698 * until we drain the queue, or use up all available transmit
1699 * descriptors.
1700 */
1701 for(;;) {
1702 /* Grab a packet off the queue. */
1703 IFQ_POLL(&ifp->if_snd, m_head);
1704 if (m_head == NULL)
1705 break;
1706
1707 if (sc->vge_ldata.vge_tx_mbuf[idx] != NULL) {
1708 /*
1709 * Slot already used, stop for now.
1710 */
1711 ifp->if_flags |= IFF_OACTIVE;
1712 break;
1713 }
1714
1715 if ((error = vge_encap(sc, m_head, idx))) {
1716 if (error == EFBIG) {
1717 printf("%s: Tx packet consumes too many "
1718 "DMA segments, dropping...\n",
1719 sc->sc_dev.dv_xname);
1720 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1721 m_freem(m_head);
1722 continue;
1723 }
1724
1725 /*
1726 * Short on resources, just stop for now.
1727 */
1728 if (error == ENOBUFS)
1729 ifp->if_flags |= IFF_OACTIVE;
1730 break;
1731 }
1732
1733 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1734
1735 /*
1736 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1737 */
1738
1739 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
1740 htole16(VGE_TXDESC_Q);
1741
1742 if (sc->vge_ldata.vge_tx_mbuf[idx] != m_head) {
1743 m_freem(m_head);
1744 m_head = sc->vge_ldata.vge_tx_mbuf[idx];
1745 }
1746
1747 pidx = idx;
1748 VGE_TX_DESC_INC(idx);
1749
1750 /*
1751 * If there's a BPF listener, bounce a copy of this frame
1752 * to him.
1753 */
1754 #if NBPFILTER > 0
1755 if (ifp->if_bpf)
1756 bpf_mtap(ifp->if_bpf, m_head);
1757 #endif
1758 }
1759
1760 if (idx == sc->vge_ldata.vge_tx_prodidx) {
1761 VGE_UNLOCK(sc);
1762 return;
1763 }
1764
1765 /* Flush the TX descriptors */
1766
1767 bus_dmamap_sync(sc->vge_dmat,
1768 sc->vge_ldata.vge_tx_list_map,
1769 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
1770 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1771
1772 /* Issue a transmit command. */
1773 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1774
1775 sc->vge_ldata.vge_tx_prodidx = idx;
1776
1777 /*
1778 * Use the countdown timer for interrupt moderation.
1779 * 'TX done' interrupts are disabled. Instead, we reset the
1780 * countdown timer, which will begin counting until it hits
1781 * the value in the SSTIMER register, and then trigger an
1782 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1783 * the timer count is reloaded. Only when the transmitter
1784 * is idle will the timer hit 0 and an interrupt fire.
1785 */
1786 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1787
1788 VGE_UNLOCK(sc);
1789
1790 /*
1791 * Set a timeout in case the chip goes out to lunch.
1792 */
1793 ifp->if_timer = 5;
1794
1795 return;
1796 }
1797
1798 static int
1799 vge_init(ifp)
1800 struct ifnet *ifp;
1801 {
1802 struct vge_softc *sc = ifp->if_softc;
1803 struct mii_data *mii = &sc->sc_mii;
1804 int i;
1805
1806 VGE_LOCK(sc);
1807
1808 /*
1809 * Cancel pending I/O and free all RX/TX buffers.
1810 */
1811 vge_stop(sc);
1812 vge_reset(sc);
1813
1814 /*
1815 * Initialize the RX and TX descriptors and mbufs.
1816 */
1817
1818 vge_rx_list_init(sc);
1819 vge_tx_list_init(sc);
1820
1821 /* Set our station address */
1822 for (i = 0; i < ETHER_ADDR_LEN; i++)
1823 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->vge_eaddr[i]);
1824
1825 /*
1826 * Set receive FIFO threshold. Also allow transmission and
1827 * reception of VLAN tagged frames.
1828 */
1829 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1830 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1831
1832 /* Set DMA burst length */
1833 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1834 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1835
1836 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1837
1838 /* Set collision backoff algorithm */
1839 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1840 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1841 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1842
1843 /* Disable LPSEL field in priority resolution */
1844 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1845
1846 /*
1847 * Load the addresses of the DMA queues into the chip.
1848 * Note that we only use one transmit queue.
1849 */
1850
1851 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
1852 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr));
1853 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
1854
1855 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
1856 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr));
1857 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
1858 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
1859
1860 /* Enable and wake up the RX descriptor queue */
1861 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1862 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1863
1864 /* Enable the TX descriptor queue */
1865 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1866
1867 /* Set up the receive filter -- allow large frames for VLANs. */
1868 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1869
1870 /* If we want promiscuous mode, set the allframes bit. */
1871 if (ifp->if_flags & IFF_PROMISC) {
1872 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1873 }
1874
1875 /* Set capture broadcast bit to capture broadcast frames. */
1876 if (ifp->if_flags & IFF_BROADCAST) {
1877 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1878 }
1879
1880 /* Set multicast bit to capture multicast frames. */
1881 if (ifp->if_flags & IFF_MULTICAST) {
1882 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1883 }
1884
1885 /* Init the cam filter. */
1886 vge_cam_clear(sc);
1887
1888 /* Init the multicast filter. */
1889 vge_setmulti(sc);
1890
1891 /* Enable flow control */
1892
1893 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1894
1895 /* Enable jumbo frame reception (if desired) */
1896
1897 /* Start the MAC. */
1898 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1899 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1900 CSR_WRITE_1(sc, VGE_CRS0,
1901 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1902
1903 /*
1904 * Configure one-shot timer for microsecond
1905 * resulution and load it for 500 usecs.
1906 */
1907 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
1908 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
1909
1910 /*
1911 * Configure interrupt moderation for receive. Enable
1912 * the holdoff counter and load it, and set the RX
1913 * suppression count to the number of descriptors we
1914 * want to allow before triggering an interrupt.
1915 * The holdoff timer is in units of 20 usecs.
1916 */
1917
1918 #ifdef notyet
1919 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
1920 /* Select the interrupt holdoff timer page. */
1921 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1922 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
1923 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
1924
1925 /* Enable use of the holdoff timer. */
1926 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
1927 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
1928
1929 /* Select the RX suppression threshold page. */
1930 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1931 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
1932 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
1933
1934 /* Restore the page select bits. */
1935 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1936 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
1937 #endif
1938
1939 #ifdef DEVICE_POLLING
1940 /*
1941 * Disable interrupts if we are polling.
1942 */
1943 if (ifp->if_flags & IFF_POLLING) {
1944 CSR_WRITE_4(sc, VGE_IMR, 0);
1945 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1946 } else /* otherwise ... */
1947 #endif /* DEVICE_POLLING */
1948 {
1949 /*
1950 * Enable interrupts.
1951 */
1952 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1953 CSR_WRITE_4(sc, VGE_ISR, 0);
1954 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1955 }
1956
1957 mii_mediachg(mii);
1958
1959 ifp->if_flags |= IFF_RUNNING;
1960 ifp->if_flags &= ~IFF_OACTIVE;
1961
1962 sc->vge_if_flags = 0;
1963 sc->vge_link = 0;
1964
1965 VGE_UNLOCK(sc);
1966
1967 callout_schedule(&sc->vge_timeout, hz);
1968
1969 return (0);
1970 }
1971
1972 /*
1973 * Set media options.
1974 */
1975 static int
1976 vge_ifmedia_upd(ifp)
1977 struct ifnet *ifp;
1978 {
1979 struct vge_softc *sc = ifp->if_softc;
1980 struct mii_data *mii = &sc->sc_mii;
1981
1982 mii_mediachg(mii);
1983
1984 return (0);
1985 }
1986
1987 /*
1988 * Report current media status.
1989 */
1990 static void
1991 vge_ifmedia_sts(ifp, ifmr)
1992 struct ifnet *ifp;
1993 struct ifmediareq *ifmr;
1994 {
1995 struct vge_softc *sc = ifp->if_softc;
1996 struct mii_data *mii = &sc->sc_mii;
1997
1998 mii_pollstat(mii);
1999 ifmr->ifm_active = mii->mii_media_active;
2000 ifmr->ifm_status = mii->mii_media_status;
2001
2002 return;
2003 }
2004
2005 static void
2006 vge_miibus_statchg(self)
2007 struct device *self;
2008 {
2009 struct vge_softc *sc = (struct vge_softc *) self;
2010 struct mii_data *mii = &sc->sc_mii;
2011 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
2012
2013 /*
2014 * If the user manually selects a media mode, we need to turn
2015 * on the forced MAC mode bit in the DIAGCTL register. If the
2016 * user happens to choose a full duplex mode, we also need to
2017 * set the 'force full duplex' bit. This applies only to
2018 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2019 * mode is disabled, and in 1000baseT mode, full duplex is
2020 * always implied, so we turn on the forced mode bit but leave
2021 * the FDX bit cleared.
2022 */
2023
2024 switch (IFM_SUBTYPE(ife->ifm_media)) {
2025 case IFM_AUTO:
2026 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2027 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2028 break;
2029 case IFM_1000_T:
2030 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2031 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2032 break;
2033 case IFM_100_TX:
2034 case IFM_10_T:
2035 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2036 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2037 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2038 } else {
2039 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2040 }
2041 break;
2042 default:
2043 printf("%s: unknown media type: %x\n",
2044 sc->sc_dev.dv_xname,
2045 IFM_SUBTYPE(ife->ifm_media));
2046 break;
2047 }
2048
2049 return;
2050 }
2051
2052 static int
2053 vge_ioctl(ifp, command, data)
2054 struct ifnet *ifp;
2055 u_long command;
2056 caddr_t data;
2057 {
2058 struct vge_softc *sc = ifp->if_softc;
2059 struct ifreq *ifr = (struct ifreq *) data;
2060 struct mii_data *mii;
2061 int error = 0;
2062
2063 switch (command) {
2064 case SIOCSIFMTU:
2065 if (ifr->ifr_mtu > VGE_JUMBO_MTU)
2066 error = EINVAL;
2067 ifp->if_mtu = ifr->ifr_mtu;
2068 break;
2069 case SIOCSIFFLAGS:
2070 if (ifp->if_flags & IFF_UP) {
2071 if (ifp->if_flags & IFF_RUNNING &&
2072 ifp->if_flags & IFF_PROMISC &&
2073 !(sc->vge_if_flags & IFF_PROMISC)) {
2074 CSR_SETBIT_1(sc, VGE_RXCTL,
2075 VGE_RXCTL_RX_PROMISC);
2076 vge_setmulti(sc);
2077 } else if (ifp->if_flags & IFF_RUNNING &&
2078 !(ifp->if_flags & IFF_PROMISC) &&
2079 sc->vge_if_flags & IFF_PROMISC) {
2080 CSR_CLRBIT_1(sc, VGE_RXCTL,
2081 VGE_RXCTL_RX_PROMISC);
2082 vge_setmulti(sc);
2083 } else
2084 vge_init(ifp);
2085 } else {
2086 if (ifp->if_flags & IFF_RUNNING)
2087 vge_stop(sc);
2088 }
2089 sc->vge_if_flags = ifp->if_flags;
2090 break;
2091 case SIOCADDMULTI:
2092 case SIOCDELMULTI:
2093 vge_setmulti(sc);
2094 break;
2095 case SIOCGIFMEDIA:
2096 case SIOCSIFMEDIA:
2097 mii = &sc->sc_mii;
2098 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2099 break;
2100 default:
2101 error = ether_ioctl(ifp, command, data);
2102 break;
2103 }
2104
2105 return (error);
2106 }
2107
2108 static void
2109 vge_watchdog(ifp)
2110 struct ifnet *ifp;
2111 {
2112 struct vge_softc *sc;
2113
2114 sc = ifp->if_softc;
2115 VGE_LOCK(sc);
2116 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2117 ifp->if_oerrors++;
2118
2119 vge_txeof(sc);
2120 vge_rxeof(sc);
2121
2122 vge_init(ifp);
2123
2124 VGE_UNLOCK(sc);
2125
2126 return;
2127 }
2128
2129 /*
2130 * Stop the adapter and free any mbufs allocated to the
2131 * RX and TX lists.
2132 */
2133 static void
2134 vge_stop(sc)
2135 struct vge_softc *sc;
2136 {
2137 register int i;
2138 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2139
2140 VGE_LOCK(sc);
2141 ifp->if_timer = 0;
2142
2143 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2144 #ifdef DEVICE_POLLING
2145 ether_poll_deregister(ifp);
2146 #endif /* DEVICE_POLLING */
2147
2148 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2149 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2150 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2151 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2152 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2153 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2154
2155 if (sc->vge_head != NULL) {
2156 m_freem(sc->vge_head);
2157 sc->vge_head = sc->vge_tail = NULL;
2158 }
2159
2160 /* Free the TX list buffers. */
2161
2162 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
2163 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
2164 bus_dmamap_unload(sc->vge_dmat,
2165 sc->vge_ldata.vge_tx_dmamap[i]);
2166 m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
2167 sc->vge_ldata.vge_tx_mbuf[i] = NULL;
2168 }
2169 }
2170
2171 /* Free the RX list buffers. */
2172
2173 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
2174 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
2175 bus_dmamap_unload(sc->vge_dmat,
2176 sc->vge_ldata.vge_rx_dmamap[i]);
2177 m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
2178 sc->vge_ldata.vge_rx_mbuf[i] = NULL;
2179 }
2180 }
2181
2182 VGE_UNLOCK(sc);
2183
2184 return;
2185 }
2186
2187 #if VGE_POWER_MANAGEMENT
2188 /*
2189 * Device suspend routine. Stop the interface and save some PCI
2190 * settings in case the BIOS doesn't restore them properly on
2191 * resume.
2192 */
2193 static int
2194 vge_suspend(dev)
2195 struct device * dev;
2196 {
2197 struct vge_softc *sc;
2198 int i;
2199
2200 sc = device_get_softc(dev);
2201
2202 vge_stop(sc);
2203
2204 for (i = 0; i < 5; i++)
2205 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2206 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2207 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2208 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2209 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2210
2211 sc->suspended = 1;
2212
2213 return (0);
2214 }
2215
2216 /*
2217 * Device resume routine. Restore some PCI settings in case the BIOS
2218 * doesn't, re-enable busmastering, and restart the interface if
2219 * appropriate.
2220 */
2221 static int
2222 vge_resume(dev)
2223 struct device * dev;
2224 {
2225 struct vge_softc *sc = (struct vge_softc *)dev;
2226 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2227 int i;
2228
2229 /* better way to do this? */
2230 for (i = 0; i < 5; i++)
2231 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
2232 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
2233 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
2234 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
2235 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
2236
2237 /* reenable busmastering */
2238 pci_enable_busmaster(dev);
2239 pci_enable_io(dev, SYS_RES_MEMORY);
2240
2241 /* reinitialize interface if necessary */
2242 if (ifp->if_flags & IFF_UP)
2243 vge_init(sc);
2244
2245 sc->suspended = 0;
2246
2247 return (0);
2248 }
2249 #endif
2250
2251 /*
2252 * Stop all chip I/O so that the kernel's probe routines don't
2253 * get confused by errant DMAs when rebooting.
2254 */
2255 static void
2256 vge_shutdown(arg)
2257 void *arg;
2258 {
2259 struct vge_softc *sc = (struct vge_softc *)arg;
2260
2261 vge_stop(sc);
2262 }
2263