if_vge.c revision 1.16 1 /* $NetBSD: if_vge.c,v 1.16 2006/10/15 10:33:25 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 2004
5 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.16 2006/10/15 10:33:25 tsutsui Exp $");
39
40 /*
41 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
42 *
43 * Written by Bill Paul <wpaul (at) windriver.com>
44 * Senior Networking Software Engineer
45 * Wind River Systems
46 */
47
48 /*
49 * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that
50 * combines a tri-speed ethernet MAC and PHY, with the following
51 * features:
52 *
53 * o Jumbo frame support up to 16K
54 * o Transmit and receive flow control
55 * o IPv4 checksum offload
56 * o VLAN tag insertion and stripping
57 * o TCP large send
58 * o 64-bit multicast hash table filter
59 * o 64 entry CAM filter
60 * o 16K RX FIFO and 48K TX FIFO memory
61 * o Interrupt moderation
62 *
63 * The VT6122 supports up to four transmit DMA queues. The descriptors
64 * in the transmit ring can address up to 7 data fragments; frames which
65 * span more than 7 data buffers must be coalesced, but in general the
66 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
67 * long. The receive descriptors address only a single buffer.
68 *
69 * There are two peculiar design issues with the VT6122. One is that
70 * receive data buffers must be aligned on a 32-bit boundary. This is
71 * not a problem where the VT6122 is used as a LOM device in x86-based
72 * systems, but on architectures that generate unaligned access traps, we
73 * have to do some copying.
74 *
75 * The other issue has to do with the way 64-bit addresses are handled.
76 * The DMA descriptors only allow you to specify 48 bits of addressing
77 * information. The remaining 16 bits are specified using one of the
78 * I/O registers. If you only have a 32-bit system, then this isn't
79 * an issue, but if you have a 64-bit system and more than 4GB of
80 * memory, you must have to make sure your network data buffers reside
81 * in the same 48-bit 'segment.'
82 *
83 * Special thanks to Ryan Fu at VIA Networking for providing documentation
84 * and sample NICs for testing.
85 */
86
87 #include "bpfilter.h"
88
89 #include <sys/param.h>
90 #include <sys/endian.h>
91 #include <sys/systm.h>
92 #include <sys/sockio.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97
98 #include <net/if.h>
99 #include <net/if_arp.h>
100 #include <net/if_ether.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103
104 #include <net/bpf.h>
105
106 #include <machine/bus.h>
107
108 #include <dev/mii/mii.h>
109 #include <dev/mii/miivar.h>
110
111 #include <dev/pci/pcireg.h>
112 #include <dev/pci/pcivar.h>
113 #include <dev/pci/pcidevs.h>
114
115 #include <dev/pci/if_vgereg.h>
116 #include <dev/pci/if_vgevar.h>
117
118 static int vge_probe(struct device *, struct cfdata *, void *);
119 static void vge_attach(struct device *, struct device *, void *);
120
121 static int vge_encap(struct vge_softc *, struct mbuf *, int);
122
123 static int vge_allocmem(struct vge_softc *);
124 static int vge_newbuf(struct vge_softc *, int, struct mbuf *);
125 static int vge_rx_list_init(struct vge_softc *);
126 static int vge_tx_list_init(struct vge_softc *);
127 #ifndef __NO_STRICT_ALIGNMENT
128 static inline void vge_fixup_rx(struct mbuf *);
129 #endif
130 static void vge_rxeof(struct vge_softc *);
131 static void vge_txeof(struct vge_softc *);
132 static int vge_intr(void *);
133 static void vge_tick(void *);
134 static void vge_start(struct ifnet *);
135 static int vge_ioctl(struct ifnet *, u_long, caddr_t);
136 static int vge_init(struct ifnet *);
137 static void vge_stop(struct vge_softc *);
138 static void vge_watchdog(struct ifnet *);
139 #if VGE_POWER_MANAGEMENT
140 static int vge_suspend(struct device *);
141 static int vge_resume(struct device *);
142 #endif
143 static void vge_shutdown(void *);
144 static int vge_ifmedia_upd(struct ifnet *);
145 static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
146
147 static uint16_t vge_read_eeprom(struct vge_softc *, int);
148
149 static void vge_miipoll_start(struct vge_softc *);
150 static void vge_miipoll_stop(struct vge_softc *);
151 static int vge_miibus_readreg(struct device *, int, int);
152 static void vge_miibus_writereg(struct device *, int, int, int);
153 static void vge_miibus_statchg(struct device *);
154
155 static void vge_cam_clear(struct vge_softc *);
156 static int vge_cam_set(struct vge_softc *, uint8_t *);
157 static void vge_setmulti(struct vge_softc *);
158 static void vge_reset(struct vge_softc *);
159
160 #define VGE_PCI_LOIO 0x10
161 #define VGE_PCI_LOMEM 0x14
162
163 CFATTACH_DECL(vge, sizeof(struct vge_softc),
164 vge_probe, vge_attach, NULL, NULL);
165
166 /*
167 * Defragment mbuf chain contents to be as linear as possible.
168 * Returns new mbuf chain on success, NULL on failure. Old mbuf
169 * chain is always freed.
170 * XXX temporary until there would be generic function doing this.
171 */
172 #define m_defrag vge_m_defrag
173 struct mbuf * vge_m_defrag(struct mbuf *, int);
174
175 struct mbuf *
176 vge_m_defrag(struct mbuf *mold, int flags)
177 {
178 struct mbuf *m0, *mn, *n;
179 size_t sz = mold->m_pkthdr.len;
180
181 #ifdef DIAGNOSTIC
182 if ((mold->m_flags & M_PKTHDR) == 0)
183 panic("m_defrag: not a mbuf chain header");
184 #endif
185
186 MGETHDR(m0, flags, MT_DATA);
187 if (m0 == NULL)
188 return NULL;
189 m0->m_pkthdr.len = mold->m_pkthdr.len;
190 mn = m0;
191
192 do {
193 if (sz > MHLEN) {
194 MCLGET(mn, M_DONTWAIT);
195 if ((mn->m_flags & M_EXT) == 0) {
196 m_freem(m0);
197 return NULL;
198 }
199 }
200
201 mn->m_len = MIN(sz, MCLBYTES);
202
203 m_copydata(mold, mold->m_pkthdr.len - sz, mn->m_len,
204 mtod(mn, caddr_t));
205
206 sz -= mn->m_len;
207
208 if (sz > 0) {
209 /* need more mbufs */
210 MGET(n, M_NOWAIT, MT_DATA);
211 if (n == NULL) {
212 m_freem(m0);
213 return NULL;
214 }
215
216 mn->m_next = n;
217 mn = n;
218 }
219 } while (sz > 0);
220
221 return m0;
222 }
223
224 /*
225 * Read a word of data stored in the EEPROM at address 'addr.'
226 */
227 static uint16_t
228 vge_read_eeprom(struct vge_softc *sc, int addr)
229 {
230 int i;
231 uint16_t word = 0;
232
233 /*
234 * Enter EEPROM embedded programming mode. In order to
235 * access the EEPROM at all, we first have to set the
236 * EELOAD bit in the CHIPCFG2 register.
237 */
238 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
239 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
240
241 /* Select the address of the word we want to read */
242 CSR_WRITE_1(sc, VGE_EEADDR, addr);
243
244 /* Issue read command */
245 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
246
247 /* Wait for the done bit to be set. */
248 for (i = 0; i < VGE_TIMEOUT; i++) {
249 DELAY(5);
250 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
251 break;
252 }
253
254 if (i == VGE_TIMEOUT) {
255 printf("%s: EEPROM read timed out\n", sc->sc_dev.dv_xname);
256 return 0;
257 }
258
259 /* Read the result */
260 word = CSR_READ_2(sc, VGE_EERDDAT);
261
262 /* Turn off EEPROM access mode. */
263 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
264 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
265
266 return word;
267 }
268
269 static void
270 vge_miipoll_stop(struct vge_softc *sc)
271 {
272 int i;
273
274 CSR_WRITE_1(sc, VGE_MIICMD, 0);
275
276 for (i = 0; i < VGE_TIMEOUT; i++) {
277 DELAY(1);
278 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
279 break;
280 }
281
282 if (i == VGE_TIMEOUT) {
283 printf("%s: failed to idle MII autopoll\n",
284 sc->sc_dev.dv_xname);
285 }
286 }
287
288 static void
289 vge_miipoll_start(struct vge_softc *sc)
290 {
291 int i;
292
293 /* First, make sure we're idle. */
294
295 CSR_WRITE_1(sc, VGE_MIICMD, 0);
296 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
297
298 for (i = 0; i < VGE_TIMEOUT; i++) {
299 DELAY(1);
300 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
301 break;
302 }
303
304 if (i == VGE_TIMEOUT) {
305 printf("%s: failed to idle MII autopoll\n",
306 sc->sc_dev.dv_xname);
307 return;
308 }
309
310 /* Now enable auto poll mode. */
311
312 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
313
314 /* And make sure it started. */
315
316 for (i = 0; i < VGE_TIMEOUT; i++) {
317 DELAY(1);
318 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
319 break;
320 }
321
322 if (i == VGE_TIMEOUT) {
323 printf("%s: failed to start MII autopoll\n",
324 sc->sc_dev.dv_xname);
325 }
326 }
327
328 static int
329 vge_miibus_readreg(struct device *dev, int phy, int reg)
330 {
331 struct vge_softc *sc;
332 int i;
333 uint16_t rval;
334
335 sc = (void *)dev;
336 rval = 0;
337 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
338 return 0;
339
340 VGE_LOCK(sc);
341 vge_miipoll_stop(sc);
342
343 /* Specify the register we want to read. */
344 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
345
346 /* Issue read command. */
347 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
348
349 /* Wait for the read command bit to self-clear. */
350 for (i = 0; i < VGE_TIMEOUT; i++) {
351 DELAY(1);
352 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
353 break;
354 }
355
356 if (i == VGE_TIMEOUT)
357 printf("%s: MII read timed out\n", sc->sc_dev.dv_xname);
358 else
359 rval = CSR_READ_2(sc, VGE_MIIDATA);
360
361 vge_miipoll_start(sc);
362 VGE_UNLOCK(sc);
363
364 return rval;
365 }
366
367 static void
368 vge_miibus_writereg(struct device *dev, int phy, int reg, int data)
369 {
370 struct vge_softc *sc;
371 int i;
372
373 sc = (void *)dev;
374 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
375 return;
376
377 VGE_LOCK(sc);
378 vge_miipoll_stop(sc);
379
380 /* Specify the register we want to write. */
381 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
382
383 /* Specify the data we want to write. */
384 CSR_WRITE_2(sc, VGE_MIIDATA, data);
385
386 /* Issue write command. */
387 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
388
389 /* Wait for the write command bit to self-clear. */
390 for (i = 0; i < VGE_TIMEOUT; i++) {
391 DELAY(1);
392 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
393 break;
394 }
395
396 if (i == VGE_TIMEOUT) {
397 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
398 }
399
400 vge_miipoll_start(sc);
401 VGE_UNLOCK(sc);
402 }
403
404 static void
405 vge_cam_clear(struct vge_softc *sc)
406 {
407 int i;
408
409 /*
410 * Turn off all the mask bits. This tells the chip
411 * that none of the entries in the CAM filter are valid.
412 * desired entries will be enabled as we fill the filter in.
413 */
414
415 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
416 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
417 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
418 for (i = 0; i < 8; i++)
419 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
420
421 /* Clear the VLAN filter too. */
422
423 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
424 for (i = 0; i < 8; i++)
425 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
426
427 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
428 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
429 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
430
431 sc->vge_camidx = 0;
432 }
433
434 static int
435 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
436 {
437 int i, error;
438
439 error = 0;
440
441 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
442 return ENOSPC;
443
444 /* Select the CAM data page. */
445 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
446 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
447
448 /* Set the filter entry we want to update and enable writing. */
449 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
450
451 /* Write the address to the CAM registers */
452 for (i = 0; i < ETHER_ADDR_LEN; i++)
453 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
454
455 /* Issue a write command. */
456 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
457
458 /* Wake for it to clear. */
459 for (i = 0; i < VGE_TIMEOUT; i++) {
460 DELAY(1);
461 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
462 break;
463 }
464
465 if (i == VGE_TIMEOUT) {
466 printf("%s: setting CAM filter failed\n", sc->sc_dev.dv_xname);
467 error = EIO;
468 goto fail;
469 }
470
471 /* Select the CAM mask page. */
472 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
473 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
474
475 /* Set the mask bit that enables this filter. */
476 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx / 8),
477 1 << (sc->vge_camidx & 7));
478
479 sc->vge_camidx++;
480
481 fail:
482 /* Turn off access to CAM. */
483 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
484 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
485 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
486
487 return error;
488 }
489
490 /*
491 * Program the multicast filter. We use the 64-entry CAM filter
492 * for perfect filtering. If there's more than 64 multicast addresses,
493 * we use the hash filter insted.
494 */
495 static void
496 vge_setmulti(struct vge_softc *sc)
497 {
498 struct ifnet *ifp;
499 int error;
500 uint32_t h, hashes[2] = { 0, 0 };
501 struct ether_multi *enm;
502 struct ether_multistep step;
503
504 error = 0;
505 ifp = &sc->sc_ethercom.ec_if;
506
507 /* First, zot all the multicast entries. */
508 vge_cam_clear(sc);
509 CSR_WRITE_4(sc, VGE_MAR0, 0);
510 CSR_WRITE_4(sc, VGE_MAR1, 0);
511 ifp->if_flags &= ~IFF_ALLMULTI;
512
513 /*
514 * If the user wants allmulti or promisc mode, enable reception
515 * of all multicast frames.
516 */
517 if (ifp->if_flags & IFF_PROMISC) {
518 allmulti:
519 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
520 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
521 ifp->if_flags |= IFF_ALLMULTI;
522 return;
523 }
524
525 /* Now program new ones */
526 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
527 while (enm != NULL) {
528 /*
529 * If multicast range, fall back to ALLMULTI.
530 */
531 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
532 ETHER_ADDR_LEN) != 0)
533 goto allmulti;
534
535 error = vge_cam_set(sc, enm->enm_addrlo);
536 if (error)
537 break;
538
539 ETHER_NEXT_MULTI(step, enm);
540 }
541
542 /* If there were too many addresses, use the hash filter. */
543 if (error) {
544 vge_cam_clear(sc);
545
546 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
547 while (enm != NULL) {
548 /*
549 * If multicast range, fall back to ALLMULTI.
550 */
551 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
552 ETHER_ADDR_LEN) != 0)
553 goto allmulti;
554
555 h = ether_crc32_be(enm->enm_addrlo,
556 ETHER_ADDR_LEN) >> 26;
557 hashes[h >> 5] |= 1 << (h & 0x1f);
558
559 ETHER_NEXT_MULTI(step, enm);
560 }
561
562 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
563 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
564 }
565 }
566
567 static void
568 vge_reset(struct vge_softc *sc)
569 {
570 int i;
571
572 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
573
574 for (i = 0; i < VGE_TIMEOUT; i++) {
575 DELAY(5);
576 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
577 break;
578 }
579
580 if (i == VGE_TIMEOUT) {
581 printf("%s: soft reset timed out", sc->sc_dev.dv_xname);
582 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
583 DELAY(2000);
584 }
585
586 DELAY(5000);
587
588 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
589
590 for (i = 0; i < VGE_TIMEOUT; i++) {
591 DELAY(5);
592 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
593 break;
594 }
595
596 if (i == VGE_TIMEOUT) {
597 printf("%s: EEPROM reload timed out\n", sc->sc_dev.dv_xname);
598 return;
599 }
600
601 /*
602 * On some machine, the first read data from EEPROM could be
603 * messed up, so read one dummy data here to avoid the mess.
604 */
605 (void)vge_read_eeprom(sc, 0);
606
607 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
608 }
609
610 /*
611 * Probe for a VIA gigabit chip. Check the PCI vendor and device
612 * IDs against our list and return a device name if we find a match.
613 */
614 static int
615 vge_probe(struct device *parent __unused, struct cfdata *match __unused,
616 void *aux)
617 {
618 struct pci_attach_args *pa = aux;
619
620 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH
621 && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X)
622 return 1;
623
624 return 0;
625 }
626
627 static int
628 vge_allocmem(struct vge_softc *sc)
629 {
630 int error;
631 int nseg;
632 int i;
633 bus_dma_segment_t seg;
634
635 /*
636 * Allocate map for TX descriptor list.
637 */
638 error = bus_dmamap_create(sc->vge_dmat,
639 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT,
640 &sc->vge_ldata.vge_tx_list_map);
641 if (error) {
642 printf("%s: could not allocate TX dma list map\n",
643 sc->sc_dev.dv_xname);
644 return ENOMEM;
645 }
646
647 /*
648 * Allocate memory for TX descriptor list.
649 */
650
651 error = bus_dmamem_alloc(sc->vge_dmat, VGE_TX_LIST_SZ, VGE_RING_ALIGN,
652 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
653 if (error) {
654 printf("%s: could not allocate TX ring dma memory\n",
655 sc->sc_dev.dv_xname);
656 return ENOMEM;
657 }
658
659 /* Map the memory to kernel VA space */
660
661 error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, VGE_TX_LIST_SZ,
662 (caddr_t *)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT);
663 if (error) {
664 printf("%s: could not map TX ring dma memory\n",
665 sc->sc_dev.dv_xname);
666 return ENOMEM;
667 }
668
669 /* Load the map for the TX ring. */
670 error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_tx_list_map,
671 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
672 if (error) {
673 printf("%s: could not load TX ring dma memory\n",
674 sc->sc_dev.dv_xname);
675 return ENOMEM;
676 }
677
678 /* Create DMA maps for TX buffers */
679
680 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
681 error = bus_dmamap_create(sc->vge_dmat, VGE_TX_MAXLEN,
682 VGE_TX_FRAGS, VGE_TX_MAXLEN, 0,
683 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
684 &sc->vge_ldata.vge_tx_dmamap[i]);
685 if (error) {
686 printf("%s: can't create DMA map for TX\n",
687 sc->sc_dev.dv_xname);
688 return ENOMEM;
689 }
690 }
691
692 /*
693 * Allocate map for RX descriptor list.
694 */
695 error = bus_dmamap_create(sc->vge_dmat,
696 VGE_RX_LIST_SZ, 1, VGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT,
697 &sc->vge_ldata.vge_rx_list_map);
698 if (error) {
699 printf("%s: could not allocate RX dma list map\n",
700 sc->sc_dev.dv_xname);
701 return ENOMEM;
702 }
703
704 /* Allocate DMA'able memory for the RX ring */
705
706 error = bus_dmamem_alloc(sc->vge_dmat, VGE_RX_LIST_SZ, VGE_RING_ALIGN,
707 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
708 if (error)
709 return ENOMEM;
710
711 /* Map the memory to kernel VA space */
712
713 error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, VGE_RX_LIST_SZ,
714 (caddr_t *)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT);
715 if (error)
716 return ENOMEM;
717
718 /* Load the map for the RX ring. */
719 error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_rx_list_map,
720 sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
721 if (error) {
722 printf("%s: could not load RX ring dma memory\n",
723 sc->sc_dev.dv_xname);
724 return ENOMEM;
725 }
726
727 /* Create DMA maps for RX buffers */
728
729 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
730 error = bus_dmamap_create(sc->vge_dmat, MCLBYTES,
731 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
732 &sc->vge_ldata.vge_rx_dmamap[i]);
733 if (error) {
734 printf("%s: can't create DMA map for RX\n",
735 sc->sc_dev.dv_xname);
736 return ENOMEM;
737 }
738 }
739
740 return 0;
741 }
742
743 /*
744 * Attach the interface. Allocate softc structures, do ifmedia
745 * setup and ethernet/BPF attach.
746 */
747 static void
748 vge_attach(struct device *parent __unused, struct device *self, void *aux)
749 {
750 uint8_t *eaddr;
751 struct vge_softc *sc = (struct vge_softc *)self;
752 struct ifnet *ifp;
753 struct pci_attach_args *pa = aux;
754 pci_chipset_tag_t pc = pa->pa_pc;
755 const char *intrstr;
756 pci_intr_handle_t ih;
757 uint16_t val;
758
759 aprint_normal(": VIA VT612X Gigabit Ethernet (rev. %#x)\n",
760 PCI_REVISION(pa->pa_class));
761
762 /* Make sure bus-mastering is enabled */
763 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
764 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
765 PCI_COMMAND_MASTER_ENABLE);
766
767 /*
768 * Map control/status registers.
769 */
770 if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
771 &sc->vge_btag, &sc->vge_bhandle, NULL, NULL) != 0) {
772 aprint_error("%s: couldn't map memory\n", sc->sc_dev.dv_xname);
773 return;
774 }
775
776 /*
777 * Map and establish our interrupt.
778 */
779 if (pci_intr_map(pa, &ih)) {
780 aprint_error("%s: unable to map interrupt\n",
781 sc->sc_dev.dv_xname);
782 return;
783 }
784 intrstr = pci_intr_string(pc, ih);
785 sc->vge_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc);
786 if (sc->vge_intrhand == NULL) {
787 printf("%s: unable to establish interrupt",
788 sc->sc_dev.dv_xname);
789 if (intrstr != NULL)
790 printf(" at %s", intrstr);
791 printf("\n");
792 return;
793 }
794 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
795
796 /* Reset the adapter. */
797 vge_reset(sc);
798
799 /*
800 * Get station address from the EEPROM.
801 */
802 eaddr = sc->vge_eaddr;
803 val = vge_read_eeprom(sc, VGE_EE_EADDR + 0);
804 eaddr[0] = val & 0xff;
805 eaddr[1] = val >> 8;
806 val = vge_read_eeprom(sc, VGE_EE_EADDR + 1);
807 eaddr[2] = val & 0xff;
808 eaddr[3] = val >> 8;
809 val = vge_read_eeprom(sc, VGE_EE_EADDR + 2);
810 eaddr[4] = val & 0xff;
811 eaddr[5] = val >> 8;
812
813 printf("%s: Ethernet address: %s\n", sc->sc_dev.dv_xname,
814 ether_sprintf(eaddr));
815
816 /*
817 * Use the 32bit tag. Hardware supports 48bit physical addresses,
818 * but we don't use that for now.
819 */
820 sc->vge_dmat = pa->pa_dmat;
821
822 if (vge_allocmem(sc))
823 return;
824
825 ifp = &sc->sc_ethercom.ec_if;
826 ifp->if_softc = sc;
827 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
828 ifp->if_mtu = ETHERMTU;
829 ifp->if_baudrate = IF_Gbps(1);
830 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
831 ifp->if_ioctl = vge_ioctl;
832 ifp->if_start = vge_start;
833
834 /*
835 * We can support 802.1Q VLAN-sized frames and jumbo
836 * Ethernet frames.
837 */
838 sc->sc_ethercom.ec_capabilities |=
839 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU |
840 ETHERCAP_VLAN_HWTAGGING;
841
842 /*
843 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
844 */
845 ifp->if_capabilities |=
846 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
847 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
848 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
849
850 #ifdef DEVICE_POLLING
851 #ifdef IFCAP_POLLING
852 ifp->if_capabilities |= IFCAP_POLLING;
853 #endif
854 #endif
855 ifp->if_watchdog = vge_watchdog;
856 ifp->if_init = vge_init;
857 IFQ_SET_MAXLEN(&ifp->if_snd, max(VGE_IFQ_MAXLEN, IFQ_MAXLEN));
858
859 /*
860 * Initialize our media structures and probe the MII.
861 */
862 sc->sc_mii.mii_ifp = ifp;
863 sc->sc_mii.mii_readreg = vge_miibus_readreg;
864 sc->sc_mii.mii_writereg = vge_miibus_writereg;
865 sc->sc_mii.mii_statchg = vge_miibus_statchg;
866 ifmedia_init(&sc->sc_mii.mii_media, 0, vge_ifmedia_upd,
867 vge_ifmedia_sts);
868 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
869 MII_OFFSET_ANY, MIIF_DOPAUSE);
870 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
871 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
872 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
873 } else
874 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
875
876 /*
877 * Attach the interface.
878 */
879 if_attach(ifp);
880 ether_ifattach(ifp, eaddr);
881
882 callout_init(&sc->vge_timeout);
883 callout_setfunc(&sc->vge_timeout, vge_tick, sc);
884
885 /*
886 * Make sure the interface is shutdown during reboot.
887 */
888 if (shutdownhook_establish(vge_shutdown, sc) == NULL) {
889 printf("%s: WARNING: unable to establish shutdown hook\n",
890 sc->sc_dev.dv_xname);
891 }
892 }
893
894 static int
895 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m)
896 {
897 struct vge_rx_desc *d;
898 struct mbuf *m_new;
899 bus_dmamap_t map;
900 int i;
901
902 m_new = NULL;
903 if (m == NULL) {
904 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
905 if (m_new == NULL)
906 return ENOBUFS;
907
908 MCLGET(m_new, M_DONTWAIT);
909 if ((m_new->m_flags & M_EXT) == 0) {
910 m_freem(m_new);
911 return ENOBUFS;
912 }
913
914 m = m_new;
915 } else
916 m->m_data = m->m_ext.ext_buf;
917
918
919 #ifndef __NO_STRICT_ALIGNMENT
920 /*
921 * This is part of an evil trick to deal with non-x86 platforms.
922 * The VIA chip requires RX buffers to be aligned on 32-bit
923 * boundaries, but that will hose non-x86 machines. To get around
924 * this, we leave some empty space at the start of each buffer
925 * and for non-x86 hosts, we copy the buffer back two bytes
926 * to achieve word alignment. This is slightly more efficient
927 * than allocating a new buffer, copying the contents, and
928 * discarding the old buffer.
929 */
930 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
931 m_adj(m, VGE_ETHER_ALIGN);
932 #else
933 m->m_len = m->m_pkthdr.len = MCLBYTES;
934 #endif
935 map = sc->vge_ldata.vge_rx_dmamap[idx];
936
937 if (bus_dmamap_load_mbuf(sc->vge_dmat, map, m, BUS_DMA_NOWAIT) != 0)
938 goto out;
939
940 d = &sc->vge_ldata.vge_rx_list[idx];
941
942 /* If this descriptor is still owned by the chip, bail. */
943
944 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
945 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
946 printf("%s: tried to map busy descriptor\n",
947 sc->sc_dev.dv_xname);
948 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
949 goto out;
950 }
951
952 d->vge_buflen =
953 htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I);
954 d->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[0].ds_addr));
955 d->vge_addrhi = htole16(VGE_ADDR_HI(map->dm_segs[0].ds_addr) & 0xFFFF);
956 d->vge_sts = 0;
957 d->vge_ctl = 0;
958 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
959
960 bus_dmamap_sync(sc->vge_dmat,
961 sc->vge_ldata.vge_rx_dmamap[idx],
962 0, sc->vge_ldata.vge_rx_dmamap[idx]->dm_mapsize,
963 BUS_DMASYNC_PREREAD);
964
965 /*
966 * Note: the manual fails to document the fact that for
967 * proper opration, the driver needs to replentish the RX
968 * DMA ring 4 descriptors at a time (rather than one at a
969 * time, like most chips). We can allocate the new buffers
970 * but we should not set the OWN bits until we're ready
971 * to hand back 4 of them in one shot.
972 */
973
974 #define VGE_RXCHUNK 4
975 sc->vge_rx_consumed++;
976 if (sc->vge_rx_consumed == VGE_RXCHUNK) {
977 for (i = idx; i != idx - sc->vge_rx_consumed; i--) {
978 sc->vge_ldata.vge_rx_list[i].vge_sts |=
979 htole32(VGE_RDSTS_OWN);
980 VGE_RXDESCSYNC(sc, i,
981 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
982 }
983 sc->vge_rx_consumed = 0;
984 }
985
986 sc->vge_ldata.vge_rx_mbuf[idx] = m;
987
988 return 0;
989 out:
990 if (m_new != NULL)
991 m_freem(m_new);
992 return ENOMEM;
993 }
994
995 static int
996 vge_tx_list_init(struct vge_softc *sc)
997 {
998
999 memset((char *)sc->vge_ldata.vge_tx_list, 0, VGE_TX_LIST_SZ);
1000 bus_dmamap_sync(sc->vge_dmat, sc->vge_ldata.vge_tx_list_map,
1001 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
1002 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1003
1004 memset((char *)&sc->vge_ldata.vge_tx_mbuf, 0,
1005 (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
1006
1007 sc->vge_ldata.vge_tx_prodidx = 0;
1008 sc->vge_ldata.vge_tx_considx = 0;
1009 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
1010
1011 return 0;
1012 }
1013
1014 static int
1015 vge_rx_list_init(struct vge_softc *sc)
1016 {
1017 int i;
1018
1019 memset((char *)sc->vge_ldata.vge_rx_list, 0, VGE_RX_LIST_SZ);
1020 memset((char *)&sc->vge_ldata.vge_rx_mbuf, 0,
1021 (VGE_RX_DESC_CNT * sizeof(struct mbuf *)));
1022
1023 sc->vge_rx_consumed = 0;
1024
1025 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1026 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
1027 return (ENOBUFS);
1028 }
1029
1030 sc->vge_ldata.vge_rx_prodidx = 0;
1031 sc->vge_rx_consumed = 0;
1032 sc->vge_head = sc->vge_tail = NULL;
1033
1034 return 0;
1035 }
1036
1037 #ifndef __NO_STRICT_ALIGNMENT
1038 static inline void
1039 vge_fixup_rx(struct mbuf *m)
1040 {
1041 int i;
1042 uint16_t *src, *dst;
1043
1044 src = mtod(m, uint16_t *);
1045 dst = src - 1;
1046
1047 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1048 *dst++ = *src++;
1049
1050 m->m_data -= ETHER_ALIGN;
1051 }
1052 #endif
1053
1054 /*
1055 * RX handler. We support the reception of jumbo frames that have
1056 * been fragmented across multiple 2K mbuf cluster buffers.
1057 */
1058 static void
1059 vge_rxeof(struct vge_softc *sc)
1060 {
1061 struct mbuf *m;
1062 struct ifnet *ifp;
1063 int idx, total_len, lim;
1064 struct vge_rx_desc *cur_rx;
1065 uint32_t rxstat, rxctl;
1066
1067 VGE_LOCK_ASSERT(sc);
1068 ifp = &sc->sc_ethercom.ec_if;
1069 idx = sc->vge_ldata.vge_rx_prodidx;
1070 lim = 0;
1071
1072 /* Invalidate the descriptor memory */
1073
1074 for (;;) {
1075 cur_rx = &sc->vge_ldata.vge_rx_list[idx];
1076
1077 VGE_RXDESCSYNC(sc, idx,
1078 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1079 rxstat = le32toh(cur_rx->vge_sts);
1080 if ((rxstat & VGE_RDSTS_OWN) != 0) {
1081 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1082 break;
1083 }
1084
1085 #ifdef DEVICE_POLLING
1086 if (ifp->if_flags & IFF_POLLING) {
1087 if (sc->rxcycles <= 0)
1088 break;
1089 sc->rxcycles--;
1090 }
1091 #endif /* DEVICE_POLLING */
1092
1093 m = sc->vge_ldata.vge_rx_mbuf[idx];
1094 total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16;
1095 rxctl = le32toh(cur_rx->vge_ctl);
1096
1097 /* Invalidate the RX mbuf and unload its map */
1098
1099 bus_dmamap_sync(sc->vge_dmat,
1100 sc->vge_ldata.vge_rx_dmamap[idx],
1101 0, sc->vge_ldata.vge_rx_dmamap[idx]->dm_mapsize,
1102 BUS_DMASYNC_POSTREAD);
1103 bus_dmamap_unload(sc->vge_dmat,
1104 sc->vge_ldata.vge_rx_dmamap[idx]);
1105
1106 /*
1107 * If the 'start of frame' bit is set, this indicates
1108 * either the first fragment in a multi-fragment receive,
1109 * or an intermediate fragment. Either way, we want to
1110 * accumulate the buffers.
1111 */
1112 if (rxstat & VGE_RXPKT_SOF) {
1113 m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
1114 if (sc->vge_head == NULL)
1115 sc->vge_head = sc->vge_tail = m;
1116 else {
1117 m->m_flags &= ~M_PKTHDR;
1118 sc->vge_tail->m_next = m;
1119 sc->vge_tail = m;
1120 }
1121 vge_newbuf(sc, idx, NULL);
1122 VGE_RX_DESC_INC(idx);
1123 continue;
1124 }
1125
1126 /*
1127 * Bad/error frames will have the RXOK bit cleared.
1128 * However, there's one error case we want to allow:
1129 * if a VLAN tagged frame arrives and the chip can't
1130 * match it against the CAM filter, it considers this
1131 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1132 * We don't want to drop the frame though: our VLAN
1133 * filtering is done in software.
1134 */
1135 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM)
1136 && !(rxstat & VGE_RDSTS_CSUMERR)) {
1137 ifp->if_ierrors++;
1138 /*
1139 * If this is part of a multi-fragment packet,
1140 * discard all the pieces.
1141 */
1142 if (sc->vge_head != NULL) {
1143 m_freem(sc->vge_head);
1144 sc->vge_head = sc->vge_tail = NULL;
1145 }
1146 vge_newbuf(sc, idx, m);
1147 VGE_RX_DESC_INC(idx);
1148 continue;
1149 }
1150
1151 /*
1152 * If allocating a replacement mbuf fails,
1153 * reload the current one.
1154 */
1155
1156 if (vge_newbuf(sc, idx, NULL)) {
1157 ifp->if_ierrors++;
1158 if (sc->vge_head != NULL) {
1159 m_freem(sc->vge_head);
1160 sc->vge_head = sc->vge_tail = NULL;
1161 }
1162 vge_newbuf(sc, idx, m);
1163 VGE_RX_DESC_INC(idx);
1164 continue;
1165 }
1166
1167 VGE_RX_DESC_INC(idx);
1168
1169 if (sc->vge_head != NULL) {
1170 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
1171 /*
1172 * Special case: if there's 4 bytes or less
1173 * in this buffer, the mbuf can be discarded:
1174 * the last 4 bytes is the CRC, which we don't
1175 * care about anyway.
1176 */
1177 if (m->m_len <= ETHER_CRC_LEN) {
1178 sc->vge_tail->m_len -=
1179 (ETHER_CRC_LEN - m->m_len);
1180 m_freem(m);
1181 } else {
1182 m->m_len -= ETHER_CRC_LEN;
1183 m->m_flags &= ~M_PKTHDR;
1184 sc->vge_tail->m_next = m;
1185 }
1186 m = sc->vge_head;
1187 sc->vge_head = sc->vge_tail = NULL;
1188 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1189 } else
1190 m->m_pkthdr.len = m->m_len =
1191 (total_len - ETHER_CRC_LEN);
1192
1193 #ifndef __NO_STRICT_ALIGNMENT
1194 vge_fixup_rx(m);
1195 #endif
1196 ifp->if_ipackets++;
1197 m->m_pkthdr.rcvif = ifp;
1198
1199 /* Do RX checksumming if enabled */
1200 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) {
1201
1202 /* Check IP header checksum */
1203 if (rxctl & VGE_RDCTL_IPPKT)
1204 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1205 if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0)
1206 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1207 }
1208
1209 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
1210 /* Check UDP checksum */
1211 if (rxctl & VGE_RDCTL_TCPPKT)
1212 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1213
1214 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1215 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1216 }
1217
1218 if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) {
1219 /* Check UDP checksum */
1220 if (rxctl & VGE_RDCTL_UDPPKT)
1221 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1222
1223 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1224 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1225 }
1226
1227 if (rxstat & VGE_RDSTS_VTAG)
1228 VLAN_INPUT_TAG(ifp, m,
1229 ntohs((rxctl & VGE_RDCTL_VLANID)), continue);
1230
1231 #if NBPFILTER > 0
1232 /*
1233 * Handle BPF listeners.
1234 */
1235 if (ifp->if_bpf)
1236 bpf_mtap(ifp->if_bpf, m);
1237 #endif
1238
1239 VGE_UNLOCK(sc);
1240 (*ifp->if_input)(ifp, m);
1241 VGE_LOCK(sc);
1242
1243 lim++;
1244 if (lim == VGE_RX_DESC_CNT)
1245 break;
1246
1247 }
1248
1249 sc->vge_ldata.vge_rx_prodidx = idx;
1250 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1251 }
1252
1253 static void
1254 vge_txeof(struct vge_softc *sc)
1255 {
1256 struct ifnet *ifp;
1257 uint32_t txstat;
1258 int idx;
1259
1260 ifp = &sc->sc_ethercom.ec_if;
1261 idx = sc->vge_ldata.vge_tx_considx;
1262
1263 while (idx != sc->vge_ldata.vge_tx_prodidx) {
1264 VGE_TXDESCSYNC(sc, idx,
1265 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1266
1267 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
1268 if (txstat & VGE_TDSTS_OWN) {
1269 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1270 break;
1271 }
1272
1273 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
1274 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
1275 bus_dmamap_unload(sc->vge_dmat,
1276 sc->vge_ldata.vge_tx_dmamap[idx]);
1277 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1278 ifp->if_collisions++;
1279 if (txstat & VGE_TDSTS_TXERR)
1280 ifp->if_oerrors++;
1281 else
1282 ifp->if_opackets++;
1283
1284 sc->vge_ldata.vge_tx_free++;
1285 VGE_TX_DESC_INC(idx);
1286 }
1287
1288 /* No changes made to the TX ring, so no flush needed */
1289
1290 if (idx != sc->vge_ldata.vge_tx_considx) {
1291 sc->vge_ldata.vge_tx_considx = idx;
1292 ifp->if_flags &= ~IFF_OACTIVE;
1293 ifp->if_timer = 0;
1294 }
1295
1296 /*
1297 * If not all descriptors have been released reaped yet,
1298 * reload the timer so that we will eventually get another
1299 * interrupt that will cause us to re-enter this routine.
1300 * This is done in case the transmitter has gone idle.
1301 */
1302 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) {
1303 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1304 }
1305 }
1306
1307 static void
1308 vge_tick(void *xsc)
1309 {
1310 struct vge_softc *sc;
1311 struct ifnet *ifp;
1312 struct mii_data *mii;
1313 int s;
1314
1315 sc = xsc;
1316 ifp = &sc->sc_ethercom.ec_if;
1317 mii = &sc->sc_mii;
1318
1319 s = splnet();
1320
1321 VGE_LOCK(sc);
1322
1323 callout_schedule(&sc->vge_timeout, hz);
1324
1325 mii_tick(mii);
1326 if (sc->vge_link) {
1327 if (!(mii->mii_media_status & IFM_ACTIVE))
1328 sc->vge_link = 0;
1329 } else {
1330 if (mii->mii_media_status & IFM_ACTIVE &&
1331 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1332 sc->vge_link = 1;
1333 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1334 vge_start(ifp);
1335 }
1336 }
1337
1338 VGE_UNLOCK(sc);
1339
1340 splx(s);
1341 }
1342
1343 #ifdef DEVICE_POLLING
1344 static void
1345 vge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1346 {
1347 struct vge_softc *sc = ifp->if_softc;
1348
1349 VGE_LOCK(sc);
1350 #ifdef IFCAP_POLLING
1351 if (!(ifp->if_capenable & IFCAP_POLLING)) {
1352 ether_poll_deregister(ifp);
1353 cmd = POLL_DEREGISTER;
1354 }
1355 #endif
1356 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1357 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1358 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
1359 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1360 goto done;
1361 }
1362
1363 sc->rxcycles = count;
1364 vge_rxeof(sc);
1365 vge_txeof(sc);
1366
1367 #if __FreeBSD_version < 502114
1368 if (ifp->if_snd.ifq_head != NULL)
1369 #else
1370 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1371 #endif
1372 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
1373
1374 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1375 uint32_t status;
1376 status = CSR_READ_4(sc, VGE_ISR);
1377 if (status == 0xFFFFFFFF)
1378 goto done;
1379 if (status)
1380 CSR_WRITE_4(sc, VGE_ISR, status);
1381
1382 /*
1383 * XXX check behaviour on receiver stalls.
1384 */
1385
1386 if (status & VGE_ISR_TXDMA_STALL ||
1387 status & VGE_ISR_RXDMA_STALL)
1388 vge_init(sc);
1389
1390 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1391 vge_rxeof(sc);
1392 ifp->if_ierrors++;
1393 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1394 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1395 }
1396 }
1397 done:
1398 VGE_UNLOCK(sc);
1399 }
1400 #endif /* DEVICE_POLLING */
1401
1402 static int
1403 vge_intr(void *arg)
1404 {
1405 struct vge_softc *sc;
1406 struct ifnet *ifp;
1407 uint32_t status;
1408 int claim;
1409
1410 sc = arg;
1411 claim = 0;
1412 if (sc->suspended) {
1413 return claim;
1414 }
1415
1416 ifp = &sc->sc_ethercom.ec_if;
1417
1418 VGE_LOCK(sc);
1419
1420 if (!(ifp->if_flags & IFF_UP)) {
1421 VGE_UNLOCK(sc);
1422 return claim;
1423 }
1424
1425 #ifdef DEVICE_POLLING
1426 if (ifp->if_flags & IFF_POLLING)
1427 goto done;
1428 if (
1429 #ifdef IFCAP_POLLING
1430 (ifp->if_capenable & IFCAP_POLLING) &&
1431 #endif
1432 ether_poll_register(vge_poll, ifp)) { /* ok, disable interrupts */
1433 CSR_WRITE_4(sc, VGE_IMR, 0);
1434 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1435 vge_poll(ifp, 0, 1);
1436 goto done;
1437 }
1438
1439 #endif /* DEVICE_POLLING */
1440
1441 /* Disable interrupts */
1442 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1443
1444 for (;;) {
1445
1446 status = CSR_READ_4(sc, VGE_ISR);
1447 /* If the card has gone away the read returns 0xffff. */
1448 if (status == 0xFFFFFFFF)
1449 break;
1450
1451 if (status) {
1452 claim = 1;
1453 CSR_WRITE_4(sc, VGE_ISR, status);
1454 }
1455
1456 if ((status & VGE_INTRS) == 0)
1457 break;
1458
1459 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1460 vge_rxeof(sc);
1461
1462 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1463 vge_rxeof(sc);
1464 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1465 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1466 }
1467
1468 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1469 vge_txeof(sc);
1470
1471 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1472 vge_init(ifp);
1473
1474 if (status & VGE_ISR_LINKSTS)
1475 vge_tick(sc);
1476 }
1477
1478 /* Re-enable interrupts */
1479 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1480
1481 #ifdef DEVICE_POLLING
1482 done:
1483 #endif
1484 VGE_UNLOCK(sc);
1485
1486 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1487 vge_start(ifp);
1488
1489 return claim;
1490 }
1491
1492 static int
1493 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx)
1494 {
1495 struct vge_tx_desc *d;
1496 struct vge_tx_frag *f;
1497 struct mbuf *m_new;
1498 bus_dmamap_t map;
1499 int seg, error, flags;
1500 struct m_tag *mtag;
1501 size_t sz;
1502
1503 d = &sc->vge_ldata.vge_tx_list[idx];
1504
1505 /* If this descriptor is still owned by the chip, bail. */
1506 if (sc->vge_ldata.vge_tx_free <= 2) {
1507 VGE_TXDESCSYNC(sc, idx,
1508 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1509 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) {
1510 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1511 return ENOBUFS;
1512 }
1513 }
1514
1515 map = sc->vge_ldata.vge_tx_dmamap[idx];
1516 error = bus_dmamap_load_mbuf(sc->vge_dmat, map, m_head, BUS_DMA_NOWAIT);
1517
1518 /* If too many segments to map, coalesce */
1519 if (error == EFBIG) {
1520 m_new = m_defrag(m_head, M_DONTWAIT);
1521 if (m_new == NULL)
1522 return (error);
1523
1524 error = bus_dmamap_load_mbuf(sc->vge_dmat, map,
1525 m_new, BUS_DMA_NOWAIT);
1526 if (error) {
1527 m_freem(m_new);
1528 return error;
1529 }
1530
1531 m_head = m_new;
1532 } else if (error)
1533 return error;
1534
1535 for (seg = 0, f = &d->vge_frag[0]; seg < map->dm_nsegs; seg++, f++) {
1536 f->vge_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len));
1537 f->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[seg].ds_addr));
1538 f->vge_addrhi = htole16(VGE_ADDR_HI(map->dm_segs[seg].ds_addr));
1539 }
1540
1541 /* Argh. This chip does not autopad short frames */
1542
1543 sz = m_head->m_pkthdr.len;
1544 if (m_head->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1545 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - sz));
1546 f->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[0].ds_addr));
1547 f->vge_addrhi =
1548 htole16(VGE_ADDR_HI(map->dm_segs[0].ds_addr) & 0xFFFF);
1549 sz = VGE_MIN_FRAMELEN;
1550 seg++;
1551 }
1552 VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE);
1553
1554 /*
1555 * When telling the chip how many segments there are, we
1556 * must use nsegs + 1 instead of just nsegs. Darned if I
1557 * know why.
1558 */
1559 seg++;
1560
1561 flags = 0;
1562 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
1563 flags |= VGE_TDCTL_IPCSUM;
1564 if (m_head->m_pkthdr.csum_flags & M_CSUM_TCPv4)
1565 flags |= VGE_TDCTL_TCPCSUM;
1566 if (m_head->m_pkthdr.csum_flags & M_CSUM_UDPv4)
1567 flags |= VGE_TDCTL_UDPCSUM;
1568 d->vge_sts = htole32(sz << 16);
1569 d->vge_ctl = htole32(flags | (seg << 28) | VGE_TD_LS_NORM);
1570
1571 if (sz > ETHERMTU + ETHER_HDR_LEN)
1572 d->vge_ctl |= htole32(VGE_TDCTL_JUMBO);
1573
1574 bus_dmamap_sync(sc->vge_dmat, map, 0, map->dm_mapsize,
1575 BUS_DMASYNC_PREWRITE);
1576
1577 sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
1578 sc->vge_ldata.vge_tx_free--;
1579
1580 /*
1581 * Set up hardware VLAN tagging.
1582 */
1583
1584 mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m_head);
1585 if (mtag != NULL)
1586 d->vge_ctl |=
1587 htole32(htons(VLAN_TAG_VALUE(mtag)) | VGE_TDCTL_VTAG);
1588
1589 d->vge_sts |= htole32(VGE_TDSTS_OWN);
1590
1591 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1592
1593 return 0;
1594 }
1595
1596 /*
1597 * Main transmit routine.
1598 */
1599
1600 static void
1601 vge_start(struct ifnet *ifp)
1602 {
1603 struct vge_softc *sc;
1604 struct mbuf *m_head;
1605 int idx, pidx, error;
1606
1607 sc = ifp->if_softc;
1608 VGE_LOCK(sc);
1609
1610 if (!sc->vge_link ||
1611 (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
1612 VGE_UNLOCK(sc);
1613 return;
1614 }
1615
1616 m_head = NULL;
1617 idx = sc->vge_ldata.vge_tx_prodidx;
1618
1619 pidx = idx - 1;
1620 if (pidx < 0)
1621 pidx = VGE_TX_DESC_CNT - 1;
1622
1623 /*
1624 * Loop through the send queue, setting up transmit descriptors
1625 * until we drain the queue, or use up all available transmit
1626 * descriptors.
1627 */
1628 for (;;) {
1629 /* Grab a packet off the queue. */
1630 IFQ_POLL(&ifp->if_snd, m_head);
1631 if (m_head == NULL)
1632 break;
1633
1634 if (sc->vge_ldata.vge_tx_mbuf[idx] != NULL) {
1635 /*
1636 * Slot already used, stop for now.
1637 */
1638 ifp->if_flags |= IFF_OACTIVE;
1639 break;
1640 }
1641
1642 if ((error = vge_encap(sc, m_head, idx))) {
1643 if (error == EFBIG) {
1644 printf("%s: Tx packet consumes too many "
1645 "DMA segments, dropping...\n",
1646 sc->sc_dev.dv_xname);
1647 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1648 m_freem(m_head);
1649 continue;
1650 }
1651
1652 /*
1653 * Short on resources, just stop for now.
1654 */
1655 if (error == ENOBUFS)
1656 ifp->if_flags |= IFF_OACTIVE;
1657 break;
1658 }
1659
1660 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1661
1662 /*
1663 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1664 */
1665
1666 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
1667 htole16(VGE_TXDESC_Q);
1668 VGE_TXDESCSYNC(sc, pidx,
1669 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1670
1671 if (sc->vge_ldata.vge_tx_mbuf[idx] != m_head) {
1672 m_freem(m_head);
1673 m_head = sc->vge_ldata.vge_tx_mbuf[idx];
1674 }
1675
1676 pidx = idx;
1677 VGE_TX_DESC_INC(idx);
1678
1679 /*
1680 * If there's a BPF listener, bounce a copy of this frame
1681 * to him.
1682 */
1683 #if NBPFILTER > 0
1684 if (ifp->if_bpf)
1685 bpf_mtap(ifp->if_bpf, m_head);
1686 #endif
1687 }
1688
1689 if (idx == sc->vge_ldata.vge_tx_prodidx) {
1690 VGE_UNLOCK(sc);
1691 return;
1692 }
1693
1694 /* Issue a transmit command. */
1695 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1696
1697 sc->vge_ldata.vge_tx_prodidx = idx;
1698
1699 /*
1700 * Use the countdown timer for interrupt moderation.
1701 * 'TX done' interrupts are disabled. Instead, we reset the
1702 * countdown timer, which will begin counting until it hits
1703 * the value in the SSTIMER register, and then trigger an
1704 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1705 * the timer count is reloaded. Only when the transmitter
1706 * is idle will the timer hit 0 and an interrupt fire.
1707 */
1708 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1709
1710 VGE_UNLOCK(sc);
1711
1712 /*
1713 * Set a timeout in case the chip goes out to lunch.
1714 */
1715 ifp->if_timer = 5;
1716 }
1717
1718 static int
1719 vge_init(struct ifnet *ifp)
1720 {
1721 struct vge_softc *sc;
1722 int i;
1723
1724 sc = ifp->if_softc;
1725
1726 VGE_LOCK(sc);
1727
1728 /*
1729 * Cancel pending I/O and free all RX/TX buffers.
1730 */
1731 vge_stop(sc);
1732 vge_reset(sc);
1733
1734 /*
1735 * Initialize the RX and TX descriptors and mbufs.
1736 */
1737
1738 vge_rx_list_init(sc);
1739 vge_tx_list_init(sc);
1740
1741 /* Set our station address */
1742 for (i = 0; i < ETHER_ADDR_LEN; i++)
1743 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->vge_eaddr[i]);
1744
1745 /*
1746 * Set receive FIFO threshold. Also allow transmission and
1747 * reception of VLAN tagged frames.
1748 */
1749 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1750 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1751
1752 /* Set DMA burst length */
1753 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1754 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1755
1756 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1757
1758 /* Set collision backoff algorithm */
1759 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1760 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1761 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1762
1763 /* Disable LPSEL field in priority resolution */
1764 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1765
1766 /*
1767 * Load the addresses of the DMA queues into the chip.
1768 * Note that we only use one transmit queue.
1769 */
1770
1771 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
1772 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_map->dm_segs[0].ds_addr));
1773 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
1774
1775 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
1776 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_map->dm_segs[0].ds_addr));
1777 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
1778 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
1779
1780 /* Enable and wake up the RX descriptor queue */
1781 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1782 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1783
1784 /* Enable the TX descriptor queue */
1785 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1786
1787 /* Set up the receive filter -- allow large frames for VLANs. */
1788 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1789
1790 /* If we want promiscuous mode, set the allframes bit. */
1791 if (ifp->if_flags & IFF_PROMISC) {
1792 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1793 }
1794
1795 /* Set capture broadcast bit to capture broadcast frames. */
1796 if (ifp->if_flags & IFF_BROADCAST) {
1797 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1798 }
1799
1800 /* Set multicast bit to capture multicast frames. */
1801 if (ifp->if_flags & IFF_MULTICAST) {
1802 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1803 }
1804
1805 /* Init the cam filter. */
1806 vge_cam_clear(sc);
1807
1808 /* Init the multicast filter. */
1809 vge_setmulti(sc);
1810
1811 /* Enable flow control */
1812
1813 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1814
1815 /* Enable jumbo frame reception (if desired) */
1816
1817 /* Start the MAC. */
1818 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1819 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1820 CSR_WRITE_1(sc, VGE_CRS0,
1821 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1822
1823 /*
1824 * Configure one-shot timer for microsecond
1825 * resulution and load it for 500 usecs.
1826 */
1827 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
1828 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
1829
1830 /*
1831 * Configure interrupt moderation for receive. Enable
1832 * the holdoff counter and load it, and set the RX
1833 * suppression count to the number of descriptors we
1834 * want to allow before triggering an interrupt.
1835 * The holdoff timer is in units of 20 usecs.
1836 */
1837
1838 #ifdef notyet
1839 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
1840 /* Select the interrupt holdoff timer page. */
1841 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1842 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
1843 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
1844
1845 /* Enable use of the holdoff timer. */
1846 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
1847 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
1848
1849 /* Select the RX suppression threshold page. */
1850 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1851 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
1852 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
1853
1854 /* Restore the page select bits. */
1855 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1856 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
1857 #endif
1858
1859 #ifdef DEVICE_POLLING
1860 /*
1861 * Disable interrupts if we are polling.
1862 */
1863 if (ifp->if_flags & IFF_POLLING) {
1864 CSR_WRITE_4(sc, VGE_IMR, 0);
1865 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1866 } else /* otherwise ... */
1867 #endif /* DEVICE_POLLING */
1868 {
1869 /*
1870 * Enable interrupts.
1871 */
1872 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1873 CSR_WRITE_4(sc, VGE_ISR, 0);
1874 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1875 }
1876
1877 mii_mediachg(&sc->sc_mii);
1878
1879 ifp->if_flags |= IFF_RUNNING;
1880 ifp->if_flags &= ~IFF_OACTIVE;
1881
1882 sc->vge_if_flags = 0;
1883 sc->vge_link = 0;
1884
1885 VGE_UNLOCK(sc);
1886
1887 callout_schedule(&sc->vge_timeout, hz);
1888
1889 return 0;
1890 }
1891
1892 /*
1893 * Set media options.
1894 */
1895 static int
1896 vge_ifmedia_upd(struct ifnet *ifp)
1897 {
1898 struct vge_softc *sc;
1899
1900 sc = ifp->if_softc;
1901 mii_mediachg(&sc->sc_mii);
1902
1903 return 0;
1904 }
1905
1906 /*
1907 * Report current media status.
1908 */
1909 static void
1910 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1911 {
1912 struct vge_softc *sc;
1913 struct mii_data *mii;
1914
1915 sc = ifp->if_softc;
1916 mii = &sc->sc_mii;
1917
1918 mii_pollstat(mii);
1919 ifmr->ifm_active = mii->mii_media_active;
1920 ifmr->ifm_status = mii->mii_media_status;
1921 }
1922
1923 static void
1924 vge_miibus_statchg(struct device *self)
1925 {
1926 struct vge_softc *sc;
1927 struct mii_data *mii;
1928 struct ifmedia_entry *ife;
1929
1930 sc = (void *)self;
1931 mii = &sc->sc_mii;
1932 ife = mii->mii_media.ifm_cur;
1933 /*
1934 * If the user manually selects a media mode, we need to turn
1935 * on the forced MAC mode bit in the DIAGCTL register. If the
1936 * user happens to choose a full duplex mode, we also need to
1937 * set the 'force full duplex' bit. This applies only to
1938 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
1939 * mode is disabled, and in 1000baseT mode, full duplex is
1940 * always implied, so we turn on the forced mode bit but leave
1941 * the FDX bit cleared.
1942 */
1943
1944 switch (IFM_SUBTYPE(ife->ifm_media)) {
1945 case IFM_AUTO:
1946 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1947 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1948 break;
1949 case IFM_1000_T:
1950 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1951 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1952 break;
1953 case IFM_100_TX:
1954 case IFM_10_T:
1955 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1956 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
1957 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1958 } else {
1959 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1960 }
1961 break;
1962 default:
1963 printf("%s: unknown media type: %x\n",
1964 sc->sc_dev.dv_xname,
1965 IFM_SUBTYPE(ife->ifm_media));
1966 break;
1967 }
1968 }
1969
1970 static int
1971 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1972 {
1973 struct vge_softc *sc;
1974 struct ifreq *ifr;
1975 struct mii_data *mii;
1976 int s, error;
1977
1978 sc = ifp->if_softc;
1979 ifr = (struct ifreq *)data;
1980 error = 0;
1981
1982 s = splnet();
1983
1984 switch (command) {
1985 case SIOCSIFMTU:
1986 if (ifr->ifr_mtu > VGE_JUMBO_MTU)
1987 error = EINVAL;
1988 ifp->if_mtu = ifr->ifr_mtu;
1989 break;
1990 case SIOCSIFFLAGS:
1991 if (ifp->if_flags & IFF_UP) {
1992 if (ifp->if_flags & IFF_RUNNING &&
1993 ifp->if_flags & IFF_PROMISC &&
1994 !(sc->vge_if_flags & IFF_PROMISC)) {
1995 CSR_SETBIT_1(sc, VGE_RXCTL,
1996 VGE_RXCTL_RX_PROMISC);
1997 vge_setmulti(sc);
1998 } else if (ifp->if_flags & IFF_RUNNING &&
1999 !(ifp->if_flags & IFF_PROMISC) &&
2000 sc->vge_if_flags & IFF_PROMISC) {
2001 CSR_CLRBIT_1(sc, VGE_RXCTL,
2002 VGE_RXCTL_RX_PROMISC);
2003 vge_setmulti(sc);
2004 } else
2005 vge_init(ifp);
2006 } else {
2007 if (ifp->if_flags & IFF_RUNNING)
2008 vge_stop(sc);
2009 }
2010 sc->vge_if_flags = ifp->if_flags;
2011 break;
2012 case SIOCADDMULTI:
2013 case SIOCDELMULTI:
2014 error = (command == SIOCADDMULTI) ?
2015 ether_addmulti(ifr, &sc->sc_ethercom) :
2016 ether_delmulti(ifr, &sc->sc_ethercom);
2017
2018 if (error == ENETRESET) {
2019 /*
2020 * Multicast list has changed; set the hardware filter
2021 * accordingly.
2022 */
2023 if (ifp->if_flags & IFF_RUNNING)
2024 vge_setmulti(sc);
2025 error = 0;
2026 }
2027 break;
2028 case SIOCGIFMEDIA:
2029 case SIOCSIFMEDIA:
2030 mii = &sc->sc_mii;
2031 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2032 break;
2033 default:
2034 error = ether_ioctl(ifp, command, data);
2035 break;
2036 }
2037
2038 splx(s);
2039 return error;
2040 }
2041
2042 static void
2043 vge_watchdog(struct ifnet *ifp)
2044 {
2045 struct vge_softc *sc;
2046
2047 sc = ifp->if_softc;
2048 VGE_LOCK(sc);
2049 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2050 ifp->if_oerrors++;
2051
2052 vge_txeof(sc);
2053 vge_rxeof(sc);
2054
2055 vge_init(ifp);
2056
2057 VGE_UNLOCK(sc);
2058 }
2059
2060 /*
2061 * Stop the adapter and free any mbufs allocated to the
2062 * RX and TX lists.
2063 */
2064 static void
2065 vge_stop(struct vge_softc *sc)
2066 {
2067 int i;
2068 struct ifnet *ifp;
2069
2070 ifp = &sc->sc_ethercom.ec_if;
2071
2072 VGE_LOCK(sc);
2073 ifp->if_timer = 0;
2074
2075 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2076 #ifdef DEVICE_POLLING
2077 ether_poll_deregister(ifp);
2078 #endif /* DEVICE_POLLING */
2079
2080 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2081 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2082 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2083 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2084 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2085 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2086
2087 if (sc->vge_head != NULL) {
2088 m_freem(sc->vge_head);
2089 sc->vge_head = sc->vge_tail = NULL;
2090 }
2091
2092 /* Free the TX list buffers. */
2093
2094 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
2095 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
2096 bus_dmamap_unload(sc->vge_dmat,
2097 sc->vge_ldata.vge_tx_dmamap[i]);
2098 m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
2099 sc->vge_ldata.vge_tx_mbuf[i] = NULL;
2100 }
2101 }
2102
2103 /* Free the RX list buffers. */
2104
2105 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
2106 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
2107 bus_dmamap_unload(sc->vge_dmat,
2108 sc->vge_ldata.vge_rx_dmamap[i]);
2109 m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
2110 sc->vge_ldata.vge_rx_mbuf[i] = NULL;
2111 }
2112 }
2113
2114 VGE_UNLOCK(sc);
2115 }
2116
2117 #if VGE_POWER_MANAGEMENT
2118 /*
2119 * Device suspend routine. Stop the interface and save some PCI
2120 * settings in case the BIOS doesn't restore them properly on
2121 * resume.
2122 */
2123 static int
2124 vge_suspend(struct device *dev)
2125 {
2126 struct vge_softc *sc;
2127 int i;
2128
2129 sc = device_get_softc(dev);
2130
2131 vge_stop(sc);
2132
2133 for (i = 0; i < 5; i++)
2134 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2135 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2136 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2137 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2138 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2139
2140 sc->suspended = 1;
2141
2142 return 0;
2143 }
2144
2145 /*
2146 * Device resume routine. Restore some PCI settings in case the BIOS
2147 * doesn't, re-enable busmastering, and restart the interface if
2148 * appropriate.
2149 */
2150 static int
2151 vge_resume(struct device *dev)
2152 {
2153 struct vge_softc *sc;
2154 struct ifnet *ifp;
2155 int i;
2156
2157 sc = (void *)dev;
2158 ifp = &sc->sc_ethercom.ec_if;
2159
2160 /* better way to do this? */
2161 for (i = 0; i < 5; i++)
2162 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
2163 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
2164 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
2165 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
2166 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
2167
2168 /* reenable busmastering */
2169 pci_enable_busmaster(dev);
2170 pci_enable_io(dev, SYS_RES_MEMORY);
2171
2172 /* reinitialize interface if necessary */
2173 if (ifp->if_flags & IFF_UP)
2174 vge_init(sc);
2175
2176 sc->suspended = 0;
2177
2178 return 0;
2179 }
2180 #endif
2181
2182 /*
2183 * Stop all chip I/O so that the kernel's probe routines don't
2184 * get confused by errant DMAs when rebooting.
2185 */
2186 static void
2187 vge_shutdown(void *arg)
2188 {
2189 struct vge_softc *sc;
2190
2191 sc = arg;
2192 vge_stop(sc);
2193 }
2194