if_vge.c revision 1.19 1 /* $NetBSD: if_vge.c,v 1.19 2006/10/17 09:55:12 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 2004
5 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.19 2006/10/17 09:55:12 tsutsui Exp $");
39
40 /*
41 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
42 *
43 * Written by Bill Paul <wpaul (at) windriver.com>
44 * Senior Networking Software Engineer
45 * Wind River Systems
46 */
47
48 /*
49 * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that
50 * combines a tri-speed ethernet MAC and PHY, with the following
51 * features:
52 *
53 * o Jumbo frame support up to 16K
54 * o Transmit and receive flow control
55 * o IPv4 checksum offload
56 * o VLAN tag insertion and stripping
57 * o TCP large send
58 * o 64-bit multicast hash table filter
59 * o 64 entry CAM filter
60 * o 16K RX FIFO and 48K TX FIFO memory
61 * o Interrupt moderation
62 *
63 * The VT6122 supports up to four transmit DMA queues. The descriptors
64 * in the transmit ring can address up to 7 data fragments; frames which
65 * span more than 7 data buffers must be coalesced, but in general the
66 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
67 * long. The receive descriptors address only a single buffer.
68 *
69 * There are two peculiar design issues with the VT6122. One is that
70 * receive data buffers must be aligned on a 32-bit boundary. This is
71 * not a problem where the VT6122 is used as a LOM device in x86-based
72 * systems, but on architectures that generate unaligned access traps, we
73 * have to do some copying.
74 *
75 * The other issue has to do with the way 64-bit addresses are handled.
76 * The DMA descriptors only allow you to specify 48 bits of addressing
77 * information. The remaining 16 bits are specified using one of the
78 * I/O registers. If you only have a 32-bit system, then this isn't
79 * an issue, but if you have a 64-bit system and more than 4GB of
80 * memory, you must have to make sure your network data buffers reside
81 * in the same 48-bit 'segment.'
82 *
83 * Special thanks to Ryan Fu at VIA Networking for providing documentation
84 * and sample NICs for testing.
85 */
86
87 #include "bpfilter.h"
88
89 #include <sys/param.h>
90 #include <sys/endian.h>
91 #include <sys/systm.h>
92 #include <sys/sockio.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97
98 #include <net/if.h>
99 #include <net/if_arp.h>
100 #include <net/if_ether.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103
104 #include <net/bpf.h>
105
106 #include <machine/bus.h>
107
108 #include <dev/mii/mii.h>
109 #include <dev/mii/miivar.h>
110
111 #include <dev/pci/pcireg.h>
112 #include <dev/pci/pcivar.h>
113 #include <dev/pci/pcidevs.h>
114
115 #include <dev/pci/if_vgereg.h>
116 #include <dev/pci/if_vgevar.h>
117
118 static int vge_probe(struct device *, struct cfdata *, void *);
119 static void vge_attach(struct device *, struct device *, void *);
120
121 static int vge_encap(struct vge_softc *, struct mbuf *, int);
122
123 static int vge_allocmem(struct vge_softc *);
124 static int vge_newbuf(struct vge_softc *, int, struct mbuf *);
125 static int vge_rx_list_init(struct vge_softc *);
126 static int vge_tx_list_init(struct vge_softc *);
127 #ifndef __NO_STRICT_ALIGNMENT
128 static inline void vge_fixup_rx(struct mbuf *);
129 #endif
130 static void vge_rxeof(struct vge_softc *);
131 static void vge_txeof(struct vge_softc *);
132 static int vge_intr(void *);
133 static void vge_tick(void *);
134 static void vge_start(struct ifnet *);
135 static int vge_ioctl(struct ifnet *, u_long, caddr_t);
136 static int vge_init(struct ifnet *);
137 static void vge_stop(struct vge_softc *);
138 static void vge_watchdog(struct ifnet *);
139 #if VGE_POWER_MANAGEMENT
140 static int vge_suspend(struct device *);
141 static int vge_resume(struct device *);
142 #endif
143 static void vge_shutdown(void *);
144 static int vge_ifmedia_upd(struct ifnet *);
145 static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
146
147 static uint16_t vge_read_eeprom(struct vge_softc *, int);
148
149 static void vge_miipoll_start(struct vge_softc *);
150 static void vge_miipoll_stop(struct vge_softc *);
151 static int vge_miibus_readreg(struct device *, int, int);
152 static void vge_miibus_writereg(struct device *, int, int, int);
153 static void vge_miibus_statchg(struct device *);
154
155 static void vge_cam_clear(struct vge_softc *);
156 static int vge_cam_set(struct vge_softc *, uint8_t *);
157 static void vge_setmulti(struct vge_softc *);
158 static void vge_reset(struct vge_softc *);
159
160 #define VGE_PCI_LOIO 0x10
161 #define VGE_PCI_LOMEM 0x14
162
163 CFATTACH_DECL(vge, sizeof(struct vge_softc),
164 vge_probe, vge_attach, NULL, NULL);
165
166 /*
167 * Defragment mbuf chain contents to be as linear as possible.
168 * Returns new mbuf chain on success, NULL on failure. Old mbuf
169 * chain is always freed.
170 * XXX temporary until there would be generic function doing this.
171 */
172 #define m_defrag vge_m_defrag
173 struct mbuf * vge_m_defrag(struct mbuf *, int);
174
175 struct mbuf *
176 vge_m_defrag(struct mbuf *mold, int flags)
177 {
178 struct mbuf *m0, *mn, *n;
179 size_t sz = mold->m_pkthdr.len;
180
181 #ifdef DIAGNOSTIC
182 if ((mold->m_flags & M_PKTHDR) == 0)
183 panic("m_defrag: not a mbuf chain header");
184 #endif
185
186 MGETHDR(m0, flags, MT_DATA);
187 if (m0 == NULL)
188 return NULL;
189 m0->m_pkthdr.len = mold->m_pkthdr.len;
190 mn = m0;
191
192 do {
193 if (sz > MHLEN) {
194 MCLGET(mn, M_DONTWAIT);
195 if ((mn->m_flags & M_EXT) == 0) {
196 m_freem(m0);
197 return NULL;
198 }
199 }
200
201 mn->m_len = MIN(sz, MCLBYTES);
202
203 m_copydata(mold, mold->m_pkthdr.len - sz, mn->m_len,
204 mtod(mn, caddr_t));
205
206 sz -= mn->m_len;
207
208 if (sz > 0) {
209 /* need more mbufs */
210 MGET(n, M_NOWAIT, MT_DATA);
211 if (n == NULL) {
212 m_freem(m0);
213 return NULL;
214 }
215
216 mn->m_next = n;
217 mn = n;
218 }
219 } while (sz > 0);
220
221 return m0;
222 }
223
224 /*
225 * Read a word of data stored in the EEPROM at address 'addr.'
226 */
227 static uint16_t
228 vge_read_eeprom(struct vge_softc *sc, int addr)
229 {
230 int i;
231 uint16_t word = 0;
232
233 /*
234 * Enter EEPROM embedded programming mode. In order to
235 * access the EEPROM at all, we first have to set the
236 * EELOAD bit in the CHIPCFG2 register.
237 */
238 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
239 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
240
241 /* Select the address of the word we want to read */
242 CSR_WRITE_1(sc, VGE_EEADDR, addr);
243
244 /* Issue read command */
245 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
246
247 /* Wait for the done bit to be set. */
248 for (i = 0; i < VGE_TIMEOUT; i++) {
249 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
250 break;
251 }
252
253 if (i == VGE_TIMEOUT) {
254 printf("%s: EEPROM read timed out\n", sc->sc_dev.dv_xname);
255 return 0;
256 }
257
258 /* Read the result */
259 word = CSR_READ_2(sc, VGE_EERDDAT);
260
261 /* Turn off EEPROM access mode. */
262 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
263 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
264
265 return word;
266 }
267
268 static void
269 vge_miipoll_stop(struct vge_softc *sc)
270 {
271 int i;
272
273 CSR_WRITE_1(sc, VGE_MIICMD, 0);
274
275 for (i = 0; i < VGE_TIMEOUT; i++) {
276 DELAY(1);
277 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
278 break;
279 }
280
281 if (i == VGE_TIMEOUT) {
282 printf("%s: failed to idle MII autopoll\n",
283 sc->sc_dev.dv_xname);
284 }
285 }
286
287 static void
288 vge_miipoll_start(struct vge_softc *sc)
289 {
290 int i;
291
292 /* First, make sure we're idle. */
293
294 CSR_WRITE_1(sc, VGE_MIICMD, 0);
295 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
296
297 for (i = 0; i < VGE_TIMEOUT; i++) {
298 DELAY(1);
299 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
300 break;
301 }
302
303 if (i == VGE_TIMEOUT) {
304 printf("%s: failed to idle MII autopoll\n",
305 sc->sc_dev.dv_xname);
306 return;
307 }
308
309 /* Now enable auto poll mode. */
310
311 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
312
313 /* And make sure it started. */
314
315 for (i = 0; i < VGE_TIMEOUT; i++) {
316 DELAY(1);
317 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
318 break;
319 }
320
321 if (i == VGE_TIMEOUT) {
322 printf("%s: failed to start MII autopoll\n",
323 sc->sc_dev.dv_xname);
324 }
325 }
326
327 static int
328 vge_miibus_readreg(struct device *dev, int phy, int reg)
329 {
330 struct vge_softc *sc;
331 int i;
332 uint16_t rval;
333
334 sc = (void *)dev;
335 rval = 0;
336 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
337 return 0;
338
339 VGE_LOCK(sc);
340 vge_miipoll_stop(sc);
341
342 /* Specify the register we want to read. */
343 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
344
345 /* Issue read command. */
346 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
347
348 /* Wait for the read command bit to self-clear. */
349 for (i = 0; i < VGE_TIMEOUT; i++) {
350 DELAY(1);
351 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
352 break;
353 }
354
355 if (i == VGE_TIMEOUT)
356 printf("%s: MII read timed out\n", sc->sc_dev.dv_xname);
357 else
358 rval = CSR_READ_2(sc, VGE_MIIDATA);
359
360 vge_miipoll_start(sc);
361 VGE_UNLOCK(sc);
362
363 return rval;
364 }
365
366 static void
367 vge_miibus_writereg(struct device *dev, int phy, int reg, int data)
368 {
369 struct vge_softc *sc;
370 int i;
371
372 sc = (void *)dev;
373 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
374 return;
375
376 VGE_LOCK(sc);
377 vge_miipoll_stop(sc);
378
379 /* Specify the register we want to write. */
380 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
381
382 /* Specify the data we want to write. */
383 CSR_WRITE_2(sc, VGE_MIIDATA, data);
384
385 /* Issue write command. */
386 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
387
388 /* Wait for the write command bit to self-clear. */
389 for (i = 0; i < VGE_TIMEOUT; i++) {
390 DELAY(1);
391 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
392 break;
393 }
394
395 if (i == VGE_TIMEOUT) {
396 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
397 }
398
399 vge_miipoll_start(sc);
400 VGE_UNLOCK(sc);
401 }
402
403 static void
404 vge_cam_clear(struct vge_softc *sc)
405 {
406 int i;
407
408 /*
409 * Turn off all the mask bits. This tells the chip
410 * that none of the entries in the CAM filter are valid.
411 * desired entries will be enabled as we fill the filter in.
412 */
413
414 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
415 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
416 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
417 for (i = 0; i < 8; i++)
418 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
419
420 /* Clear the VLAN filter too. */
421
422 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
423 for (i = 0; i < 8; i++)
424 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
425
426 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
427 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
428 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
429
430 sc->vge_camidx = 0;
431 }
432
433 static int
434 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
435 {
436 int i, error;
437
438 error = 0;
439
440 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
441 return ENOSPC;
442
443 /* Select the CAM data page. */
444 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
445 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
446
447 /* Set the filter entry we want to update and enable writing. */
448 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
449
450 /* Write the address to the CAM registers */
451 for (i = 0; i < ETHER_ADDR_LEN; i++)
452 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
453
454 /* Issue a write command. */
455 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
456
457 /* Wake for it to clear. */
458 for (i = 0; i < VGE_TIMEOUT; i++) {
459 DELAY(1);
460 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
461 break;
462 }
463
464 if (i == VGE_TIMEOUT) {
465 printf("%s: setting CAM filter failed\n", sc->sc_dev.dv_xname);
466 error = EIO;
467 goto fail;
468 }
469
470 /* Select the CAM mask page. */
471 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
472 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
473
474 /* Set the mask bit that enables this filter. */
475 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx / 8),
476 1 << (sc->vge_camidx & 7));
477
478 sc->vge_camidx++;
479
480 fail:
481 /* Turn off access to CAM. */
482 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
483 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
484 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
485
486 return error;
487 }
488
489 /*
490 * Program the multicast filter. We use the 64-entry CAM filter
491 * for perfect filtering. If there's more than 64 multicast addresses,
492 * we use the hash filter instead.
493 */
494 static void
495 vge_setmulti(struct vge_softc *sc)
496 {
497 struct ifnet *ifp;
498 int error;
499 uint32_t h, hashes[2] = { 0, 0 };
500 struct ether_multi *enm;
501 struct ether_multistep step;
502
503 error = 0;
504 ifp = &sc->sc_ethercom.ec_if;
505
506 /* First, zot all the multicast entries. */
507 vge_cam_clear(sc);
508 CSR_WRITE_4(sc, VGE_MAR0, 0);
509 CSR_WRITE_4(sc, VGE_MAR1, 0);
510 ifp->if_flags &= ~IFF_ALLMULTI;
511
512 /*
513 * If the user wants allmulti or promisc mode, enable reception
514 * of all multicast frames.
515 */
516 if (ifp->if_flags & IFF_PROMISC) {
517 allmulti:
518 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
519 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
520 ifp->if_flags |= IFF_ALLMULTI;
521 return;
522 }
523
524 /* Now program new ones */
525 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
526 while (enm != NULL) {
527 /*
528 * If multicast range, fall back to ALLMULTI.
529 */
530 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
531 ETHER_ADDR_LEN) != 0)
532 goto allmulti;
533
534 error = vge_cam_set(sc, enm->enm_addrlo);
535 if (error)
536 break;
537
538 ETHER_NEXT_MULTI(step, enm);
539 }
540
541 /* If there were too many addresses, use the hash filter. */
542 if (error) {
543 vge_cam_clear(sc);
544
545 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
546 while (enm != NULL) {
547 /*
548 * If multicast range, fall back to ALLMULTI.
549 */
550 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
551 ETHER_ADDR_LEN) != 0)
552 goto allmulti;
553
554 h = ether_crc32_be(enm->enm_addrlo,
555 ETHER_ADDR_LEN) >> 26;
556 hashes[h >> 5] |= 1 << (h & 0x1f);
557
558 ETHER_NEXT_MULTI(step, enm);
559 }
560
561 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
562 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
563 }
564 }
565
566 static void
567 vge_reset(struct vge_softc *sc)
568 {
569 int i;
570
571 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
572
573 for (i = 0; i < VGE_TIMEOUT; i++) {
574 DELAY(5);
575 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
576 break;
577 }
578
579 if (i == VGE_TIMEOUT) {
580 printf("%s: soft reset timed out", sc->sc_dev.dv_xname);
581 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
582 DELAY(2000);
583 }
584
585 DELAY(5000);
586
587 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
588
589 for (i = 0; i < VGE_TIMEOUT; i++) {
590 DELAY(5);
591 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
592 break;
593 }
594
595 if (i == VGE_TIMEOUT) {
596 printf("%s: EEPROM reload timed out\n", sc->sc_dev.dv_xname);
597 return;
598 }
599
600 /*
601 * On some machine, the first read data from EEPROM could be
602 * messed up, so read one dummy data here to avoid the mess.
603 */
604 (void)vge_read_eeprom(sc, 0);
605
606 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
607 }
608
609 /*
610 * Probe for a VIA gigabit chip. Check the PCI vendor and device
611 * IDs against our list and return a device name if we find a match.
612 */
613 static int
614 vge_probe(struct device *parent __unused, struct cfdata *match __unused,
615 void *aux)
616 {
617 struct pci_attach_args *pa = aux;
618
619 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH
620 && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X)
621 return 1;
622
623 return 0;
624 }
625
626 static int
627 vge_allocmem(struct vge_softc *sc)
628 {
629 int error;
630 int nseg;
631 int i;
632 bus_dma_segment_t seg;
633
634 /*
635 * Allocate map for TX descriptor list.
636 */
637 error = bus_dmamap_create(sc->vge_dmat,
638 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT,
639 &sc->vge_ldata.vge_tx_list_map);
640 if (error) {
641 printf("%s: could not allocate TX dma list map\n",
642 sc->sc_dev.dv_xname);
643 return ENOMEM;
644 }
645
646 /*
647 * Allocate memory for TX descriptor list.
648 */
649
650 error = bus_dmamem_alloc(sc->vge_dmat, VGE_TX_LIST_SZ, VGE_RING_ALIGN,
651 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
652 if (error) {
653 printf("%s: could not allocate TX ring dma memory\n",
654 sc->sc_dev.dv_xname);
655 return ENOMEM;
656 }
657
658 /* Map the memory to kernel VA space */
659
660 error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, VGE_TX_LIST_SZ,
661 (caddr_t *)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT);
662 if (error) {
663 printf("%s: could not map TX ring dma memory\n",
664 sc->sc_dev.dv_xname);
665 return ENOMEM;
666 }
667
668 /* Load the map for the TX ring. */
669 error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_tx_list_map,
670 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
671 if (error) {
672 printf("%s: could not load TX ring dma memory\n",
673 sc->sc_dev.dv_xname);
674 return ENOMEM;
675 }
676
677 /* Create DMA maps for TX buffers */
678
679 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
680 error = bus_dmamap_create(sc->vge_dmat, VGE_TX_MAXLEN,
681 VGE_TX_FRAGS, VGE_TX_MAXLEN, 0,
682 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
683 &sc->vge_ldata.vge_tx_dmamap[i]);
684 if (error) {
685 printf("%s: can't create DMA map for TX\n",
686 sc->sc_dev.dv_xname);
687 return ENOMEM;
688 }
689 }
690
691 /*
692 * Allocate map for RX descriptor list.
693 */
694 error = bus_dmamap_create(sc->vge_dmat,
695 VGE_RX_LIST_SZ, 1, VGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT,
696 &sc->vge_ldata.vge_rx_list_map);
697 if (error) {
698 printf("%s: could not allocate RX dma list map\n",
699 sc->sc_dev.dv_xname);
700 return ENOMEM;
701 }
702
703 /* Allocate DMA'able memory for the RX ring */
704
705 error = bus_dmamem_alloc(sc->vge_dmat, VGE_RX_LIST_SZ, VGE_RING_ALIGN,
706 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
707 if (error)
708 return ENOMEM;
709
710 /* Map the memory to kernel VA space */
711
712 error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, VGE_RX_LIST_SZ,
713 (caddr_t *)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT);
714 if (error)
715 return ENOMEM;
716
717 /* Load the map for the RX ring. */
718 error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_rx_list_map,
719 sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
720 if (error) {
721 printf("%s: could not load RX ring dma memory\n",
722 sc->sc_dev.dv_xname);
723 return ENOMEM;
724 }
725
726 /* Create DMA maps for RX buffers */
727
728 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
729 error = bus_dmamap_create(sc->vge_dmat, MCLBYTES,
730 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
731 &sc->vge_ldata.vge_rx_dmamap[i]);
732 if (error) {
733 printf("%s: can't create DMA map for RX\n",
734 sc->sc_dev.dv_xname);
735 return ENOMEM;
736 }
737 }
738
739 return 0;
740 }
741
742 /*
743 * Attach the interface. Allocate softc structures, do ifmedia
744 * setup and ethernet/BPF attach.
745 */
746 static void
747 vge_attach(struct device *parent __unused, struct device *self, void *aux)
748 {
749 uint8_t *eaddr;
750 struct vge_softc *sc = (struct vge_softc *)self;
751 struct ifnet *ifp;
752 struct pci_attach_args *pa = aux;
753 pci_chipset_tag_t pc = pa->pa_pc;
754 const char *intrstr;
755 pci_intr_handle_t ih;
756 uint16_t val;
757
758 aprint_normal(": VIA VT612X Gigabit Ethernet (rev. %#x)\n",
759 PCI_REVISION(pa->pa_class));
760
761 /* Make sure bus-mastering is enabled */
762 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
763 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
764 PCI_COMMAND_MASTER_ENABLE);
765
766 /*
767 * Map control/status registers.
768 */
769 if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
770 &sc->vge_btag, &sc->vge_bhandle, NULL, NULL) != 0) {
771 aprint_error("%s: couldn't map memory\n", sc->sc_dev.dv_xname);
772 return;
773 }
774
775 /*
776 * Map and establish our interrupt.
777 */
778 if (pci_intr_map(pa, &ih)) {
779 aprint_error("%s: unable to map interrupt\n",
780 sc->sc_dev.dv_xname);
781 return;
782 }
783 intrstr = pci_intr_string(pc, ih);
784 sc->vge_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc);
785 if (sc->vge_intrhand == NULL) {
786 printf("%s: unable to establish interrupt",
787 sc->sc_dev.dv_xname);
788 if (intrstr != NULL)
789 printf(" at %s", intrstr);
790 printf("\n");
791 return;
792 }
793 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
794
795 /* Reset the adapter. */
796 vge_reset(sc);
797
798 /*
799 * Get station address from the EEPROM.
800 */
801 eaddr = sc->vge_eaddr;
802 val = vge_read_eeprom(sc, VGE_EE_EADDR + 0);
803 eaddr[0] = val & 0xff;
804 eaddr[1] = val >> 8;
805 val = vge_read_eeprom(sc, VGE_EE_EADDR + 1);
806 eaddr[2] = val & 0xff;
807 eaddr[3] = val >> 8;
808 val = vge_read_eeprom(sc, VGE_EE_EADDR + 2);
809 eaddr[4] = val & 0xff;
810 eaddr[5] = val >> 8;
811
812 printf("%s: Ethernet address: %s\n", sc->sc_dev.dv_xname,
813 ether_sprintf(eaddr));
814
815 /*
816 * Use the 32bit tag. Hardware supports 48bit physical addresses,
817 * but we don't use that for now.
818 */
819 sc->vge_dmat = pa->pa_dmat;
820
821 if (vge_allocmem(sc))
822 return;
823
824 ifp = &sc->sc_ethercom.ec_if;
825 ifp->if_softc = sc;
826 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
827 ifp->if_mtu = ETHERMTU;
828 ifp->if_baudrate = IF_Gbps(1);
829 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
830 ifp->if_ioctl = vge_ioctl;
831 ifp->if_start = vge_start;
832
833 /*
834 * We can support 802.1Q VLAN-sized frames and jumbo
835 * Ethernet frames.
836 */
837 sc->sc_ethercom.ec_capabilities |=
838 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU |
839 ETHERCAP_VLAN_HWTAGGING;
840
841 /*
842 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
843 */
844 ifp->if_capabilities |=
845 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
846 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
847 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
848
849 #ifdef DEVICE_POLLING
850 #ifdef IFCAP_POLLING
851 ifp->if_capabilities |= IFCAP_POLLING;
852 #endif
853 #endif
854 ifp->if_watchdog = vge_watchdog;
855 ifp->if_init = vge_init;
856 IFQ_SET_MAXLEN(&ifp->if_snd, max(VGE_IFQ_MAXLEN, IFQ_MAXLEN));
857
858 /*
859 * Initialize our media structures and probe the MII.
860 */
861 sc->sc_mii.mii_ifp = ifp;
862 sc->sc_mii.mii_readreg = vge_miibus_readreg;
863 sc->sc_mii.mii_writereg = vge_miibus_writereg;
864 sc->sc_mii.mii_statchg = vge_miibus_statchg;
865 ifmedia_init(&sc->sc_mii.mii_media, 0, vge_ifmedia_upd,
866 vge_ifmedia_sts);
867 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
868 MII_OFFSET_ANY, MIIF_DOPAUSE);
869 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
870 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
871 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
872 } else
873 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
874
875 /*
876 * Attach the interface.
877 */
878 if_attach(ifp);
879 ether_ifattach(ifp, eaddr);
880
881 callout_init(&sc->vge_timeout);
882 callout_setfunc(&sc->vge_timeout, vge_tick, sc);
883
884 /*
885 * Make sure the interface is shutdown during reboot.
886 */
887 if (shutdownhook_establish(vge_shutdown, sc) == NULL) {
888 printf("%s: WARNING: unable to establish shutdown hook\n",
889 sc->sc_dev.dv_xname);
890 }
891 }
892
893 static int
894 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m)
895 {
896 struct vge_rx_desc *d;
897 struct mbuf *m_new;
898 bus_dmamap_t map;
899 int i;
900
901 m_new = NULL;
902 if (m == NULL) {
903 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
904 if (m_new == NULL)
905 return ENOBUFS;
906
907 MCLGET(m_new, M_DONTWAIT);
908 if ((m_new->m_flags & M_EXT) == 0) {
909 m_freem(m_new);
910 return ENOBUFS;
911 }
912
913 m = m_new;
914 } else
915 m->m_data = m->m_ext.ext_buf;
916
917
918 #ifndef __NO_STRICT_ALIGNMENT
919 /*
920 * This is part of an evil trick to deal with non-x86 platforms.
921 * The VIA chip requires RX buffers to be aligned on 32-bit
922 * boundaries, but that will hose non-x86 machines. To get around
923 * this, we leave some empty space at the start of each buffer
924 * and for non-x86 hosts, we copy the buffer back two bytes
925 * to achieve word alignment. This is slightly more efficient
926 * than allocating a new buffer, copying the contents, and
927 * discarding the old buffer.
928 */
929 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
930 m->m_data += VGE_ETHER_ALIGN;
931 #else
932 m->m_len = m->m_pkthdr.len = MCLBYTES;
933 #endif
934 map = sc->vge_ldata.vge_rx_dmamap[idx];
935
936 if (bus_dmamap_load_mbuf(sc->vge_dmat, map, m, BUS_DMA_NOWAIT) != 0)
937 goto out;
938
939 d = &sc->vge_ldata.vge_rx_list[idx];
940
941 /* If this descriptor is still owned by the chip, bail. */
942
943 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
944 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
945 printf("%s: tried to map busy descriptor\n",
946 sc->sc_dev.dv_xname);
947 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
948 goto out;
949 }
950
951 d->vge_buflen =
952 htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I);
953 d->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[0].ds_addr));
954 d->vge_addrhi = htole16(VGE_ADDR_HI(map->dm_segs[0].ds_addr) & 0xFFFF);
955 d->vge_sts = 0;
956 d->vge_ctl = 0;
957 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
958
959 bus_dmamap_sync(sc->vge_dmat,
960 sc->vge_ldata.vge_rx_dmamap[idx],
961 0, sc->vge_ldata.vge_rx_dmamap[idx]->dm_mapsize,
962 BUS_DMASYNC_PREREAD);
963
964 /*
965 * Note: the manual fails to document the fact that for
966 * proper opration, the driver needs to replentish the RX
967 * DMA ring 4 descriptors at a time (rather than one at a
968 * time, like most chips). We can allocate the new buffers
969 * but we should not set the OWN bits until we're ready
970 * to hand back 4 of them in one shot.
971 */
972
973 #define VGE_RXCHUNK 4
974 sc->vge_rx_consumed++;
975 if (sc->vge_rx_consumed == VGE_RXCHUNK) {
976 for (i = idx; i != idx - sc->vge_rx_consumed; i--) {
977 sc->vge_ldata.vge_rx_list[i].vge_sts |=
978 htole32(VGE_RDSTS_OWN);
979 VGE_RXDESCSYNC(sc, i,
980 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
981 }
982 sc->vge_rx_consumed = 0;
983 }
984
985 sc->vge_ldata.vge_rx_mbuf[idx] = m;
986
987 return 0;
988 out:
989 if (m_new != NULL)
990 m_freem(m_new);
991 return ENOMEM;
992 }
993
994 static int
995 vge_tx_list_init(struct vge_softc *sc)
996 {
997
998 memset((char *)sc->vge_ldata.vge_tx_list, 0, VGE_TX_LIST_SZ);
999 bus_dmamap_sync(sc->vge_dmat, sc->vge_ldata.vge_tx_list_map,
1000 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
1001 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1002
1003 memset((char *)&sc->vge_ldata.vge_tx_mbuf, 0,
1004 (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
1005
1006 sc->vge_ldata.vge_tx_prodidx = 0;
1007 sc->vge_ldata.vge_tx_considx = 0;
1008 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
1009
1010 return 0;
1011 }
1012
1013 static int
1014 vge_rx_list_init(struct vge_softc *sc)
1015 {
1016 int i;
1017
1018 memset((char *)sc->vge_ldata.vge_rx_list, 0, VGE_RX_LIST_SZ);
1019 memset((char *)&sc->vge_ldata.vge_rx_mbuf, 0,
1020 (VGE_RX_DESC_CNT * sizeof(struct mbuf *)));
1021
1022 sc->vge_rx_consumed = 0;
1023
1024 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1025 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
1026 return (ENOBUFS);
1027 }
1028
1029 sc->vge_ldata.vge_rx_prodidx = 0;
1030 sc->vge_rx_consumed = 0;
1031 sc->vge_head = sc->vge_tail = NULL;
1032
1033 return 0;
1034 }
1035
1036 #ifndef __NO_STRICT_ALIGNMENT
1037 static inline void
1038 vge_fixup_rx(struct mbuf *m)
1039 {
1040 int i;
1041 uint16_t *src, *dst;
1042
1043 src = mtod(m, uint16_t *);
1044 dst = src - 1;
1045
1046 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1047 *dst++ = *src++;
1048
1049 m->m_data -= ETHER_ALIGN;
1050 }
1051 #endif
1052
1053 /*
1054 * RX handler. We support the reception of jumbo frames that have
1055 * been fragmented across multiple 2K mbuf cluster buffers.
1056 */
1057 static void
1058 vge_rxeof(struct vge_softc *sc)
1059 {
1060 struct mbuf *m;
1061 struct ifnet *ifp;
1062 int idx, total_len, lim;
1063 struct vge_rx_desc *cur_rx;
1064 uint32_t rxstat, rxctl;
1065
1066 VGE_LOCK_ASSERT(sc);
1067 ifp = &sc->sc_ethercom.ec_if;
1068 idx = sc->vge_ldata.vge_rx_prodidx;
1069 lim = 0;
1070
1071 /* Invalidate the descriptor memory */
1072
1073 for (;;) {
1074 cur_rx = &sc->vge_ldata.vge_rx_list[idx];
1075
1076 VGE_RXDESCSYNC(sc, idx,
1077 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1078 rxstat = le32toh(cur_rx->vge_sts);
1079 if ((rxstat & VGE_RDSTS_OWN) != 0) {
1080 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1081 break;
1082 }
1083
1084 #ifdef DEVICE_POLLING
1085 if (ifp->if_flags & IFF_POLLING) {
1086 if (sc->rxcycles <= 0)
1087 break;
1088 sc->rxcycles--;
1089 }
1090 #endif /* DEVICE_POLLING */
1091
1092 m = sc->vge_ldata.vge_rx_mbuf[idx];
1093 total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16;
1094 rxctl = le32toh(cur_rx->vge_ctl);
1095
1096 /* Invalidate the RX mbuf and unload its map */
1097
1098 bus_dmamap_sync(sc->vge_dmat,
1099 sc->vge_ldata.vge_rx_dmamap[idx],
1100 0, sc->vge_ldata.vge_rx_dmamap[idx]->dm_mapsize,
1101 BUS_DMASYNC_POSTREAD);
1102 bus_dmamap_unload(sc->vge_dmat,
1103 sc->vge_ldata.vge_rx_dmamap[idx]);
1104
1105 /*
1106 * If the 'start of frame' bit is set, this indicates
1107 * either the first fragment in a multi-fragment receive,
1108 * or an intermediate fragment. Either way, we want to
1109 * accumulate the buffers.
1110 */
1111 if (rxstat & VGE_RXPKT_SOF) {
1112 m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
1113 if (sc->vge_head == NULL)
1114 sc->vge_head = sc->vge_tail = m;
1115 else {
1116 m->m_flags &= ~M_PKTHDR;
1117 sc->vge_tail->m_next = m;
1118 sc->vge_tail = m;
1119 }
1120 vge_newbuf(sc, idx, NULL);
1121 VGE_RX_DESC_INC(idx);
1122 continue;
1123 }
1124
1125 /*
1126 * Bad/error frames will have the RXOK bit cleared.
1127 * However, there's one error case we want to allow:
1128 * if a VLAN tagged frame arrives and the chip can't
1129 * match it against the CAM filter, it considers this
1130 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1131 * We don't want to drop the frame though: our VLAN
1132 * filtering is done in software.
1133 */
1134 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM)
1135 && !(rxstat & VGE_RDSTS_CSUMERR)) {
1136 ifp->if_ierrors++;
1137 /*
1138 * If this is part of a multi-fragment packet,
1139 * discard all the pieces.
1140 */
1141 if (sc->vge_head != NULL) {
1142 m_freem(sc->vge_head);
1143 sc->vge_head = sc->vge_tail = NULL;
1144 }
1145 vge_newbuf(sc, idx, m);
1146 VGE_RX_DESC_INC(idx);
1147 continue;
1148 }
1149
1150 /*
1151 * If allocating a replacement mbuf fails,
1152 * reload the current one.
1153 */
1154
1155 if (vge_newbuf(sc, idx, NULL)) {
1156 ifp->if_ierrors++;
1157 if (sc->vge_head != NULL) {
1158 m_freem(sc->vge_head);
1159 sc->vge_head = sc->vge_tail = NULL;
1160 }
1161 vge_newbuf(sc, idx, m);
1162 VGE_RX_DESC_INC(idx);
1163 continue;
1164 }
1165
1166 VGE_RX_DESC_INC(idx);
1167
1168 if (sc->vge_head != NULL) {
1169 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
1170 /*
1171 * Special case: if there's 4 bytes or less
1172 * in this buffer, the mbuf can be discarded:
1173 * the last 4 bytes is the CRC, which we don't
1174 * care about anyway.
1175 */
1176 if (m->m_len <= ETHER_CRC_LEN) {
1177 sc->vge_tail->m_len -=
1178 (ETHER_CRC_LEN - m->m_len);
1179 m_freem(m);
1180 } else {
1181 m->m_len -= ETHER_CRC_LEN;
1182 m->m_flags &= ~M_PKTHDR;
1183 sc->vge_tail->m_next = m;
1184 }
1185 m = sc->vge_head;
1186 sc->vge_head = sc->vge_tail = NULL;
1187 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1188 } else
1189 m->m_pkthdr.len = m->m_len =
1190 (total_len - ETHER_CRC_LEN);
1191
1192 #ifndef __NO_STRICT_ALIGNMENT
1193 vge_fixup_rx(m);
1194 #endif
1195 ifp->if_ipackets++;
1196 m->m_pkthdr.rcvif = ifp;
1197
1198 /* Do RX checksumming if enabled */
1199 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) {
1200
1201 /* Check IP header checksum */
1202 if (rxctl & VGE_RDCTL_IPPKT)
1203 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1204 if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0)
1205 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1206 }
1207
1208 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
1209 /* Check UDP checksum */
1210 if (rxctl & VGE_RDCTL_TCPPKT)
1211 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1212
1213 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1214 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1215 }
1216
1217 if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) {
1218 /* Check UDP checksum */
1219 if (rxctl & VGE_RDCTL_UDPPKT)
1220 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1221
1222 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1223 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1224 }
1225
1226 if (rxstat & VGE_RDSTS_VTAG)
1227 VLAN_INPUT_TAG(ifp, m,
1228 ntohs((rxctl & VGE_RDCTL_VLANID)), continue);
1229
1230 #if NBPFILTER > 0
1231 /*
1232 * Handle BPF listeners.
1233 */
1234 if (ifp->if_bpf)
1235 bpf_mtap(ifp->if_bpf, m);
1236 #endif
1237
1238 VGE_UNLOCK(sc);
1239 (*ifp->if_input)(ifp, m);
1240 VGE_LOCK(sc);
1241
1242 lim++;
1243 if (lim == VGE_RX_DESC_CNT)
1244 break;
1245
1246 }
1247
1248 sc->vge_ldata.vge_rx_prodidx = idx;
1249 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1250 }
1251
1252 static void
1253 vge_txeof(struct vge_softc *sc)
1254 {
1255 struct ifnet *ifp;
1256 uint32_t txstat;
1257 int idx;
1258
1259 ifp = &sc->sc_ethercom.ec_if;
1260 idx = sc->vge_ldata.vge_tx_considx;
1261
1262 while (idx != sc->vge_ldata.vge_tx_prodidx) {
1263 VGE_TXDESCSYNC(sc, idx,
1264 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1265
1266 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
1267 if (txstat & VGE_TDSTS_OWN) {
1268 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1269 break;
1270 }
1271
1272 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
1273 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
1274 bus_dmamap_unload(sc->vge_dmat,
1275 sc->vge_ldata.vge_tx_dmamap[idx]);
1276 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1277 ifp->if_collisions++;
1278 if (txstat & VGE_TDSTS_TXERR)
1279 ifp->if_oerrors++;
1280 else
1281 ifp->if_opackets++;
1282
1283 sc->vge_ldata.vge_tx_free++;
1284 VGE_TX_DESC_INC(idx);
1285 }
1286
1287 /* No changes made to the TX ring, so no flush needed */
1288
1289 if (idx != sc->vge_ldata.vge_tx_considx) {
1290 sc->vge_ldata.vge_tx_considx = idx;
1291 ifp->if_flags &= ~IFF_OACTIVE;
1292 ifp->if_timer = 0;
1293 }
1294
1295 /*
1296 * If not all descriptors have been released reaped yet,
1297 * reload the timer so that we will eventually get another
1298 * interrupt that will cause us to re-enter this routine.
1299 * This is done in case the transmitter has gone idle.
1300 */
1301 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) {
1302 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1303 }
1304 }
1305
1306 static void
1307 vge_tick(void *xsc)
1308 {
1309 struct vge_softc *sc;
1310 struct ifnet *ifp;
1311 struct mii_data *mii;
1312 int s;
1313
1314 sc = xsc;
1315 ifp = &sc->sc_ethercom.ec_if;
1316 mii = &sc->sc_mii;
1317
1318 s = splnet();
1319
1320 VGE_LOCK(sc);
1321
1322 callout_schedule(&sc->vge_timeout, hz);
1323
1324 mii_tick(mii);
1325 if (sc->vge_link) {
1326 if (!(mii->mii_media_status & IFM_ACTIVE))
1327 sc->vge_link = 0;
1328 } else {
1329 if (mii->mii_media_status & IFM_ACTIVE &&
1330 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1331 sc->vge_link = 1;
1332 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1333 vge_start(ifp);
1334 }
1335 }
1336
1337 VGE_UNLOCK(sc);
1338
1339 splx(s);
1340 }
1341
1342 #ifdef DEVICE_POLLING
1343 static void
1344 vge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1345 {
1346 struct vge_softc *sc = ifp->if_softc;
1347
1348 VGE_LOCK(sc);
1349 #ifdef IFCAP_POLLING
1350 if (!(ifp->if_capenable & IFCAP_POLLING)) {
1351 ether_poll_deregister(ifp);
1352 cmd = POLL_DEREGISTER;
1353 }
1354 #endif
1355 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1356 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1357 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
1358 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1359 goto done;
1360 }
1361
1362 sc->rxcycles = count;
1363 vge_rxeof(sc);
1364 vge_txeof(sc);
1365
1366 #if __FreeBSD_version < 502114
1367 if (ifp->if_snd.ifq_head != NULL)
1368 #else
1369 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1370 #endif
1371 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
1372
1373 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1374 uint32_t status;
1375 status = CSR_READ_4(sc, VGE_ISR);
1376 if (status == 0xFFFFFFFF)
1377 goto done;
1378 if (status)
1379 CSR_WRITE_4(sc, VGE_ISR, status);
1380
1381 /*
1382 * XXX check behaviour on receiver stalls.
1383 */
1384
1385 if (status & VGE_ISR_TXDMA_STALL ||
1386 status & VGE_ISR_RXDMA_STALL)
1387 vge_init(sc);
1388
1389 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1390 vge_rxeof(sc);
1391 ifp->if_ierrors++;
1392 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1393 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1394 }
1395 }
1396 done:
1397 VGE_UNLOCK(sc);
1398 }
1399 #endif /* DEVICE_POLLING */
1400
1401 static int
1402 vge_intr(void *arg)
1403 {
1404 struct vge_softc *sc;
1405 struct ifnet *ifp;
1406 uint32_t status;
1407 int claim;
1408
1409 sc = arg;
1410 claim = 0;
1411 if (sc->suspended) {
1412 return claim;
1413 }
1414
1415 ifp = &sc->sc_ethercom.ec_if;
1416
1417 VGE_LOCK(sc);
1418
1419 if (!(ifp->if_flags & IFF_UP)) {
1420 VGE_UNLOCK(sc);
1421 return claim;
1422 }
1423
1424 #ifdef DEVICE_POLLING
1425 if (ifp->if_flags & IFF_POLLING)
1426 goto done;
1427 if (
1428 #ifdef IFCAP_POLLING
1429 (ifp->if_capenable & IFCAP_POLLING) &&
1430 #endif
1431 ether_poll_register(vge_poll, ifp)) { /* ok, disable interrupts */
1432 CSR_WRITE_4(sc, VGE_IMR, 0);
1433 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1434 vge_poll(ifp, 0, 1);
1435 goto done;
1436 }
1437
1438 #endif /* DEVICE_POLLING */
1439
1440 /* Disable interrupts */
1441 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1442
1443 for (;;) {
1444
1445 status = CSR_READ_4(sc, VGE_ISR);
1446 /* If the card has gone away the read returns 0xffff. */
1447 if (status == 0xFFFFFFFF)
1448 break;
1449
1450 if (status) {
1451 claim = 1;
1452 CSR_WRITE_4(sc, VGE_ISR, status);
1453 }
1454
1455 if ((status & VGE_INTRS) == 0)
1456 break;
1457
1458 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1459 vge_rxeof(sc);
1460
1461 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1462 vge_rxeof(sc);
1463 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1464 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1465 }
1466
1467 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1468 vge_txeof(sc);
1469
1470 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1471 vge_init(ifp);
1472
1473 if (status & VGE_ISR_LINKSTS)
1474 vge_tick(sc);
1475 }
1476
1477 /* Re-enable interrupts */
1478 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1479
1480 #ifdef DEVICE_POLLING
1481 done:
1482 #endif
1483 VGE_UNLOCK(sc);
1484
1485 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1486 vge_start(ifp);
1487
1488 return claim;
1489 }
1490
1491 static int
1492 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx)
1493 {
1494 struct vge_tx_desc *d;
1495 struct vge_tx_frag *f;
1496 struct mbuf *m_new;
1497 bus_dmamap_t map;
1498 int seg, error, flags;
1499 struct m_tag *mtag;
1500 size_t sz;
1501
1502 d = &sc->vge_ldata.vge_tx_list[idx];
1503
1504 /* If this descriptor is still owned by the chip, bail. */
1505 if (sc->vge_ldata.vge_tx_free <= 2) {
1506 VGE_TXDESCSYNC(sc, idx,
1507 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1508 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) {
1509 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1510 return ENOBUFS;
1511 }
1512 }
1513
1514 map = sc->vge_ldata.vge_tx_dmamap[idx];
1515 error = bus_dmamap_load_mbuf(sc->vge_dmat, map, m_head, BUS_DMA_NOWAIT);
1516
1517 /* If too many segments to map, coalesce */
1518 if (error == EFBIG) {
1519 m_new = m_defrag(m_head, M_DONTWAIT);
1520 if (m_new == NULL)
1521 return (error);
1522
1523 error = bus_dmamap_load_mbuf(sc->vge_dmat, map,
1524 m_new, BUS_DMA_NOWAIT);
1525 if (error) {
1526 m_freem(m_new);
1527 return error;
1528 }
1529
1530 m_head = m_new;
1531 } else if (error)
1532 return error;
1533
1534 for (seg = 0, f = &d->vge_frag[0]; seg < map->dm_nsegs; seg++, f++) {
1535 f->vge_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len));
1536 f->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[seg].ds_addr));
1537 f->vge_addrhi = htole16(VGE_ADDR_HI(map->dm_segs[seg].ds_addr));
1538 }
1539
1540 /* Argh. This chip does not autopad short frames */
1541
1542 sz = m_head->m_pkthdr.len;
1543 if (m_head->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1544 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - sz));
1545 f->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[0].ds_addr));
1546 f->vge_addrhi =
1547 htole16(VGE_ADDR_HI(map->dm_segs[0].ds_addr) & 0xFFFF);
1548 sz = VGE_MIN_FRAMELEN;
1549 seg++;
1550 }
1551 VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE);
1552
1553 /*
1554 * When telling the chip how many segments there are, we
1555 * must use nsegs + 1 instead of just nsegs. Darned if I
1556 * know why.
1557 */
1558 seg++;
1559
1560 flags = 0;
1561 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
1562 flags |= VGE_TDCTL_IPCSUM;
1563 if (m_head->m_pkthdr.csum_flags & M_CSUM_TCPv4)
1564 flags |= VGE_TDCTL_TCPCSUM;
1565 if (m_head->m_pkthdr.csum_flags & M_CSUM_UDPv4)
1566 flags |= VGE_TDCTL_UDPCSUM;
1567 d->vge_sts = htole32(sz << 16);
1568 d->vge_ctl = htole32(flags | (seg << 28) | VGE_TD_LS_NORM);
1569
1570 if (sz > ETHERMTU + ETHER_HDR_LEN)
1571 d->vge_ctl |= htole32(VGE_TDCTL_JUMBO);
1572
1573 bus_dmamap_sync(sc->vge_dmat, map, 0, map->dm_mapsize,
1574 BUS_DMASYNC_PREWRITE);
1575
1576 sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
1577 sc->vge_ldata.vge_tx_free--;
1578
1579 /*
1580 * Set up hardware VLAN tagging.
1581 */
1582
1583 mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m_head);
1584 if (mtag != NULL)
1585 d->vge_ctl |=
1586 htole32(htons(VLAN_TAG_VALUE(mtag)) | VGE_TDCTL_VTAG);
1587
1588 d->vge_sts |= htole32(VGE_TDSTS_OWN);
1589
1590 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1591
1592 return 0;
1593 }
1594
1595 /*
1596 * Main transmit routine.
1597 */
1598
1599 static void
1600 vge_start(struct ifnet *ifp)
1601 {
1602 struct vge_softc *sc;
1603 struct mbuf *m_head;
1604 int idx, pidx, error;
1605
1606 sc = ifp->if_softc;
1607 VGE_LOCK(sc);
1608
1609 if (!sc->vge_link ||
1610 (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
1611 VGE_UNLOCK(sc);
1612 return;
1613 }
1614
1615 m_head = NULL;
1616 idx = sc->vge_ldata.vge_tx_prodidx;
1617
1618 pidx = idx - 1;
1619 if (pidx < 0)
1620 pidx = VGE_TX_DESC_CNT - 1;
1621
1622 /*
1623 * Loop through the send queue, setting up transmit descriptors
1624 * until we drain the queue, or use up all available transmit
1625 * descriptors.
1626 */
1627 for (;;) {
1628 /* Grab a packet off the queue. */
1629 IFQ_POLL(&ifp->if_snd, m_head);
1630 if (m_head == NULL)
1631 break;
1632
1633 if (sc->vge_ldata.vge_tx_mbuf[idx] != NULL) {
1634 /*
1635 * Slot already used, stop for now.
1636 */
1637 ifp->if_flags |= IFF_OACTIVE;
1638 break;
1639 }
1640
1641 if ((error = vge_encap(sc, m_head, idx))) {
1642 if (error == EFBIG) {
1643 printf("%s: Tx packet consumes too many "
1644 "DMA segments, dropping...\n",
1645 sc->sc_dev.dv_xname);
1646 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1647 m_freem(m_head);
1648 continue;
1649 }
1650
1651 /*
1652 * Short on resources, just stop for now.
1653 */
1654 if (error == ENOBUFS)
1655 ifp->if_flags |= IFF_OACTIVE;
1656 break;
1657 }
1658
1659 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1660
1661 /*
1662 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1663 */
1664
1665 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
1666 htole16(VGE_TXDESC_Q);
1667 VGE_TXDESCSYNC(sc, pidx,
1668 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1669
1670 if (sc->vge_ldata.vge_tx_mbuf[idx] != m_head) {
1671 m_freem(m_head);
1672 m_head = sc->vge_ldata.vge_tx_mbuf[idx];
1673 }
1674
1675 pidx = idx;
1676 VGE_TX_DESC_INC(idx);
1677
1678 /*
1679 * If there's a BPF listener, bounce a copy of this frame
1680 * to him.
1681 */
1682 #if NBPFILTER > 0
1683 if (ifp->if_bpf)
1684 bpf_mtap(ifp->if_bpf, m_head);
1685 #endif
1686 }
1687
1688 if (idx == sc->vge_ldata.vge_tx_prodidx) {
1689 VGE_UNLOCK(sc);
1690 return;
1691 }
1692
1693 /* Issue a transmit command. */
1694 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1695
1696 sc->vge_ldata.vge_tx_prodidx = idx;
1697
1698 /*
1699 * Use the countdown timer for interrupt moderation.
1700 * 'TX done' interrupts are disabled. Instead, we reset the
1701 * countdown timer, which will begin counting until it hits
1702 * the value in the SSTIMER register, and then trigger an
1703 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1704 * the timer count is reloaded. Only when the transmitter
1705 * is idle will the timer hit 0 and an interrupt fire.
1706 */
1707 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1708
1709 VGE_UNLOCK(sc);
1710
1711 /*
1712 * Set a timeout in case the chip goes out to lunch.
1713 */
1714 ifp->if_timer = 5;
1715 }
1716
1717 static int
1718 vge_init(struct ifnet *ifp)
1719 {
1720 struct vge_softc *sc;
1721 int i;
1722
1723 sc = ifp->if_softc;
1724
1725 VGE_LOCK(sc);
1726
1727 /*
1728 * Cancel pending I/O and free all RX/TX buffers.
1729 */
1730 vge_stop(sc);
1731 vge_reset(sc);
1732
1733 /*
1734 * Initialize the RX and TX descriptors and mbufs.
1735 */
1736
1737 vge_rx_list_init(sc);
1738 vge_tx_list_init(sc);
1739
1740 /* Set our station address */
1741 for (i = 0; i < ETHER_ADDR_LEN; i++)
1742 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->vge_eaddr[i]);
1743
1744 /*
1745 * Set receive FIFO threshold. Also allow transmission and
1746 * reception of VLAN tagged frames.
1747 */
1748 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1749 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1750
1751 /* Set DMA burst length */
1752 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1753 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1754
1755 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1756
1757 /* Set collision backoff algorithm */
1758 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1759 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1760 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1761
1762 /* Disable LPSEL field in priority resolution */
1763 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1764
1765 /*
1766 * Load the addresses of the DMA queues into the chip.
1767 * Note that we only use one transmit queue.
1768 */
1769
1770 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
1771 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_map->dm_segs[0].ds_addr));
1772 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
1773
1774 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
1775 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_map->dm_segs[0].ds_addr));
1776 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
1777 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
1778
1779 /* Enable and wake up the RX descriptor queue */
1780 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1781 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1782
1783 /* Enable the TX descriptor queue */
1784 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1785
1786 /* Set up the receive filter -- allow large frames for VLANs. */
1787 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1788
1789 /* If we want promiscuous mode, set the allframes bit. */
1790 if (ifp->if_flags & IFF_PROMISC) {
1791 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1792 }
1793
1794 /* Set capture broadcast bit to capture broadcast frames. */
1795 if (ifp->if_flags & IFF_BROADCAST) {
1796 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1797 }
1798
1799 /* Set multicast bit to capture multicast frames. */
1800 if (ifp->if_flags & IFF_MULTICAST) {
1801 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1802 }
1803
1804 /* Init the cam filter. */
1805 vge_cam_clear(sc);
1806
1807 /* Init the multicast filter. */
1808 vge_setmulti(sc);
1809
1810 /* Enable flow control */
1811
1812 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1813
1814 /* Enable jumbo frame reception (if desired) */
1815
1816 /* Start the MAC. */
1817 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1818 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1819 CSR_WRITE_1(sc, VGE_CRS0,
1820 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1821
1822 /*
1823 * Configure one-shot timer for microsecond
1824 * resulution and load it for 500 usecs.
1825 */
1826 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
1827 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
1828
1829 /*
1830 * Configure interrupt moderation for receive. Enable
1831 * the holdoff counter and load it, and set the RX
1832 * suppression count to the number of descriptors we
1833 * want to allow before triggering an interrupt.
1834 * The holdoff timer is in units of 20 usecs.
1835 */
1836
1837 #ifdef notyet
1838 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
1839 /* Select the interrupt holdoff timer page. */
1840 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1841 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
1842 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
1843
1844 /* Enable use of the holdoff timer. */
1845 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
1846 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
1847
1848 /* Select the RX suppression threshold page. */
1849 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1850 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
1851 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
1852
1853 /* Restore the page select bits. */
1854 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1855 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
1856 #endif
1857
1858 #ifdef DEVICE_POLLING
1859 /*
1860 * Disable interrupts if we are polling.
1861 */
1862 if (ifp->if_flags & IFF_POLLING) {
1863 CSR_WRITE_4(sc, VGE_IMR, 0);
1864 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1865 } else /* otherwise ... */
1866 #endif /* DEVICE_POLLING */
1867 {
1868 /*
1869 * Enable interrupts.
1870 */
1871 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1872 CSR_WRITE_4(sc, VGE_ISR, 0);
1873 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1874 }
1875
1876 mii_mediachg(&sc->sc_mii);
1877
1878 ifp->if_flags |= IFF_RUNNING;
1879 ifp->if_flags &= ~IFF_OACTIVE;
1880
1881 sc->vge_if_flags = 0;
1882 sc->vge_link = 0;
1883
1884 VGE_UNLOCK(sc);
1885
1886 callout_schedule(&sc->vge_timeout, hz);
1887
1888 return 0;
1889 }
1890
1891 /*
1892 * Set media options.
1893 */
1894 static int
1895 vge_ifmedia_upd(struct ifnet *ifp)
1896 {
1897 struct vge_softc *sc;
1898
1899 sc = ifp->if_softc;
1900 mii_mediachg(&sc->sc_mii);
1901
1902 return 0;
1903 }
1904
1905 /*
1906 * Report current media status.
1907 */
1908 static void
1909 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1910 {
1911 struct vge_softc *sc;
1912 struct mii_data *mii;
1913
1914 sc = ifp->if_softc;
1915 mii = &sc->sc_mii;
1916
1917 mii_pollstat(mii);
1918 ifmr->ifm_active = mii->mii_media_active;
1919 ifmr->ifm_status = mii->mii_media_status;
1920 }
1921
1922 static void
1923 vge_miibus_statchg(struct device *self)
1924 {
1925 struct vge_softc *sc;
1926 struct mii_data *mii;
1927 struct ifmedia_entry *ife;
1928
1929 sc = (void *)self;
1930 mii = &sc->sc_mii;
1931 ife = mii->mii_media.ifm_cur;
1932 /*
1933 * If the user manually selects a media mode, we need to turn
1934 * on the forced MAC mode bit in the DIAGCTL register. If the
1935 * user happens to choose a full duplex mode, we also need to
1936 * set the 'force full duplex' bit. This applies only to
1937 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
1938 * mode is disabled, and in 1000baseT mode, full duplex is
1939 * always implied, so we turn on the forced mode bit but leave
1940 * the FDX bit cleared.
1941 */
1942
1943 switch (IFM_SUBTYPE(ife->ifm_media)) {
1944 case IFM_AUTO:
1945 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1946 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1947 break;
1948 case IFM_1000_T:
1949 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1950 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1951 break;
1952 case IFM_100_TX:
1953 case IFM_10_T:
1954 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1955 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
1956 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1957 } else {
1958 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1959 }
1960 break;
1961 default:
1962 printf("%s: unknown media type: %x\n",
1963 sc->sc_dev.dv_xname,
1964 IFM_SUBTYPE(ife->ifm_media));
1965 break;
1966 }
1967 }
1968
1969 static int
1970 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1971 {
1972 struct vge_softc *sc;
1973 struct ifreq *ifr;
1974 struct mii_data *mii;
1975 int s, error;
1976
1977 sc = ifp->if_softc;
1978 ifr = (struct ifreq *)data;
1979 error = 0;
1980
1981 s = splnet();
1982
1983 switch (command) {
1984 case SIOCSIFMTU:
1985 if (ifr->ifr_mtu > VGE_JUMBO_MTU)
1986 error = EINVAL;
1987 ifp->if_mtu = ifr->ifr_mtu;
1988 break;
1989 case SIOCSIFFLAGS:
1990 if (ifp->if_flags & IFF_UP) {
1991 if (ifp->if_flags & IFF_RUNNING &&
1992 ifp->if_flags & IFF_PROMISC &&
1993 !(sc->vge_if_flags & IFF_PROMISC)) {
1994 CSR_SETBIT_1(sc, VGE_RXCTL,
1995 VGE_RXCTL_RX_PROMISC);
1996 vge_setmulti(sc);
1997 } else if (ifp->if_flags & IFF_RUNNING &&
1998 !(ifp->if_flags & IFF_PROMISC) &&
1999 sc->vge_if_flags & IFF_PROMISC) {
2000 CSR_CLRBIT_1(sc, VGE_RXCTL,
2001 VGE_RXCTL_RX_PROMISC);
2002 vge_setmulti(sc);
2003 } else
2004 vge_init(ifp);
2005 } else {
2006 if (ifp->if_flags & IFF_RUNNING)
2007 vge_stop(sc);
2008 }
2009 sc->vge_if_flags = ifp->if_flags;
2010 break;
2011 case SIOCADDMULTI:
2012 case SIOCDELMULTI:
2013 error = (command == SIOCADDMULTI) ?
2014 ether_addmulti(ifr, &sc->sc_ethercom) :
2015 ether_delmulti(ifr, &sc->sc_ethercom);
2016
2017 if (error == ENETRESET) {
2018 /*
2019 * Multicast list has changed; set the hardware filter
2020 * accordingly.
2021 */
2022 if (ifp->if_flags & IFF_RUNNING)
2023 vge_setmulti(sc);
2024 error = 0;
2025 }
2026 break;
2027 case SIOCGIFMEDIA:
2028 case SIOCSIFMEDIA:
2029 mii = &sc->sc_mii;
2030 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2031 break;
2032 default:
2033 error = ether_ioctl(ifp, command, data);
2034 break;
2035 }
2036
2037 splx(s);
2038 return error;
2039 }
2040
2041 static void
2042 vge_watchdog(struct ifnet *ifp)
2043 {
2044 struct vge_softc *sc;
2045
2046 sc = ifp->if_softc;
2047 VGE_LOCK(sc);
2048 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2049 ifp->if_oerrors++;
2050
2051 vge_txeof(sc);
2052 vge_rxeof(sc);
2053
2054 vge_init(ifp);
2055
2056 VGE_UNLOCK(sc);
2057 }
2058
2059 /*
2060 * Stop the adapter and free any mbufs allocated to the
2061 * RX and TX lists.
2062 */
2063 static void
2064 vge_stop(struct vge_softc *sc)
2065 {
2066 int i;
2067 struct ifnet *ifp;
2068
2069 ifp = &sc->sc_ethercom.ec_if;
2070
2071 VGE_LOCK(sc);
2072 ifp->if_timer = 0;
2073
2074 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2075 #ifdef DEVICE_POLLING
2076 ether_poll_deregister(ifp);
2077 #endif /* DEVICE_POLLING */
2078
2079 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2080 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2081 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2082 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2083 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2084 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2085
2086 if (sc->vge_head != NULL) {
2087 m_freem(sc->vge_head);
2088 sc->vge_head = sc->vge_tail = NULL;
2089 }
2090
2091 /* Free the TX list buffers. */
2092
2093 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
2094 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
2095 bus_dmamap_unload(sc->vge_dmat,
2096 sc->vge_ldata.vge_tx_dmamap[i]);
2097 m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
2098 sc->vge_ldata.vge_tx_mbuf[i] = NULL;
2099 }
2100 }
2101
2102 /* Free the RX list buffers. */
2103
2104 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
2105 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
2106 bus_dmamap_unload(sc->vge_dmat,
2107 sc->vge_ldata.vge_rx_dmamap[i]);
2108 m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
2109 sc->vge_ldata.vge_rx_mbuf[i] = NULL;
2110 }
2111 }
2112
2113 VGE_UNLOCK(sc);
2114 }
2115
2116 #if VGE_POWER_MANAGEMENT
2117 /*
2118 * Device suspend routine. Stop the interface and save some PCI
2119 * settings in case the BIOS doesn't restore them properly on
2120 * resume.
2121 */
2122 static int
2123 vge_suspend(struct device *dev)
2124 {
2125 struct vge_softc *sc;
2126 int i;
2127
2128 sc = device_get_softc(dev);
2129
2130 vge_stop(sc);
2131
2132 for (i = 0; i < 5; i++)
2133 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2134 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2135 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2136 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2137 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2138
2139 sc->suspended = 1;
2140
2141 return 0;
2142 }
2143
2144 /*
2145 * Device resume routine. Restore some PCI settings in case the BIOS
2146 * doesn't, re-enable busmastering, and restart the interface if
2147 * appropriate.
2148 */
2149 static int
2150 vge_resume(struct device *dev)
2151 {
2152 struct vge_softc *sc;
2153 struct ifnet *ifp;
2154 int i;
2155
2156 sc = (void *)dev;
2157 ifp = &sc->sc_ethercom.ec_if;
2158
2159 /* better way to do this? */
2160 for (i = 0; i < 5; i++)
2161 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
2162 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
2163 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
2164 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
2165 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
2166
2167 /* reenable busmastering */
2168 pci_enable_busmaster(dev);
2169 pci_enable_io(dev, SYS_RES_MEMORY);
2170
2171 /* reinitialize interface if necessary */
2172 if (ifp->if_flags & IFF_UP)
2173 vge_init(sc);
2174
2175 sc->suspended = 0;
2176
2177 return 0;
2178 }
2179 #endif
2180
2181 /*
2182 * Stop all chip I/O so that the kernel's probe routines don't
2183 * get confused by errant DMAs when rebooting.
2184 */
2185 static void
2186 vge_shutdown(void *arg)
2187 {
2188 struct vge_softc *sc;
2189
2190 sc = arg;
2191 vge_stop(sc);
2192 }
2193