if_vge.c revision 1.14 1 /* $NetBSD: if_vge.c,v 1.14 2006/10/14 19:10:35 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 2004
5 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.14 2006/10/14 19:10:35 tsutsui Exp $");
39
40 /*
41 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
42 *
43 * Written by Bill Paul <wpaul (at) windriver.com>
44 * Senior Networking Software Engineer
45 * Wind River Systems
46 */
47
48 /*
49 * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that
50 * combines a tri-speed ethernet MAC and PHY, with the following
51 * features:
52 *
53 * o Jumbo frame support up to 16K
54 * o Transmit and receive flow control
55 * o IPv4 checksum offload
56 * o VLAN tag insertion and stripping
57 * o TCP large send
58 * o 64-bit multicast hash table filter
59 * o 64 entry CAM filter
60 * o 16K RX FIFO and 48K TX FIFO memory
61 * o Interrupt moderation
62 *
63 * The VT6122 supports up to four transmit DMA queues. The descriptors
64 * in the transmit ring can address up to 7 data fragments; frames which
65 * span more than 7 data buffers must be coalesced, but in general the
66 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
67 * long. The receive descriptors address only a single buffer.
68 *
69 * There are two peculiar design issues with the VT6122. One is that
70 * receive data buffers must be aligned on a 32-bit boundary. This is
71 * not a problem where the VT6122 is used as a LOM device in x86-based
72 * systems, but on architectures that generate unaligned access traps, we
73 * have to do some copying.
74 *
75 * The other issue has to do with the way 64-bit addresses are handled.
76 * The DMA descriptors only allow you to specify 48 bits of addressing
77 * information. The remaining 16 bits are specified using one of the
78 * I/O registers. If you only have a 32-bit system, then this isn't
79 * an issue, but if you have a 64-bit system and more than 4GB of
80 * memory, you must have to make sure your network data buffers reside
81 * in the same 48-bit 'segment.'
82 *
83 * Special thanks to Ryan Fu at VIA Networking for providing documentation
84 * and sample NICs for testing.
85 */
86
87 #include "bpfilter.h"
88
89 #include <sys/param.h>
90 #include <sys/endian.h>
91 #include <sys/systm.h>
92 #include <sys/sockio.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97
98 #include <net/if.h>
99 #include <net/if_arp.h>
100 #include <net/if_ether.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103
104 #include <net/bpf.h>
105
106 #include <machine/bus.h>
107
108 #include <dev/mii/mii.h>
109 #include <dev/mii/miivar.h>
110
111 #include <dev/pci/pcireg.h>
112 #include <dev/pci/pcivar.h>
113 #include <dev/pci/pcidevs.h>
114
115 #include <dev/pci/if_vgereg.h>
116 #include <dev/pci/if_vgevar.h>
117
118 static int vge_probe (struct device *, struct cfdata *, void *);
119 static void vge_attach (struct device *, struct device *, void *);
120
121 static int vge_encap (struct vge_softc *, struct mbuf *, int);
122
123 static int vge_allocmem (struct vge_softc *);
124 static int vge_newbuf (struct vge_softc *, int, struct mbuf *);
125 static int vge_rx_list_init (struct vge_softc *);
126 static int vge_tx_list_init (struct vge_softc *);
127 #ifndef __NO_STRICT_ALIGNMENT
128 static inline void vge_fixup_rx
129 (struct mbuf *);
130 #endif
131 static void vge_rxeof (struct vge_softc *);
132 static void vge_txeof (struct vge_softc *);
133 static int vge_intr (void *);
134 static void vge_tick (void *);
135 static void vge_start (struct ifnet *);
136 static int vge_ioctl (struct ifnet *, u_long, caddr_t);
137 static int vge_init (struct ifnet *);
138 static void vge_stop (struct vge_softc *);
139 static void vge_watchdog (struct ifnet *);
140 #if VGE_POWER_MANAGEMENT
141 static int vge_suspend (struct device *);
142 static int vge_resume (struct device *);
143 #endif
144 static void vge_shutdown (void *);
145 static int vge_ifmedia_upd (struct ifnet *);
146 static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
147
148 static uint16_t vge_read_eeprom (struct vge_softc *, int);
149
150 static void vge_miipoll_start (struct vge_softc *);
151 static void vge_miipoll_stop (struct vge_softc *);
152 static int vge_miibus_readreg (struct device *, int, int);
153 static void vge_miibus_writereg (struct device *, int, int, int);
154 static void vge_miibus_statchg (struct device *);
155
156 static void vge_cam_clear (struct vge_softc *);
157 static int vge_cam_set (struct vge_softc *, uint8_t *);
158 static void vge_setmulti (struct vge_softc *);
159 static void vge_reset (struct vge_softc *);
160
161 #define VGE_PCI_LOIO 0x10
162 #define VGE_PCI_LOMEM 0x14
163
164 CFATTACH_DECL(vge, sizeof(struct vge_softc),
165 vge_probe, vge_attach, NULL, NULL);
166
167 /*
168 * Defragment mbuf chain contents to be as linear as possible.
169 * Returns new mbuf chain on success, NULL on failure. Old mbuf
170 * chain is always freed.
171 * XXX temporary until there would be generic function doing this.
172 */
173 #define m_defrag vge_m_defrag
174 struct mbuf * vge_m_defrag(struct mbuf *, int);
175
176 struct mbuf *
177 vge_m_defrag(struct mbuf *mold, int flags)
178 {
179 struct mbuf *m0, *mn, *n;
180 size_t sz = mold->m_pkthdr.len;
181
182 #ifdef DIAGNOSTIC
183 if ((mold->m_flags & M_PKTHDR) == 0)
184 panic("m_defrag: not a mbuf chain header");
185 #endif
186
187 MGETHDR(m0, flags, MT_DATA);
188 if (m0 == NULL)
189 return NULL;
190 m0->m_pkthdr.len = mold->m_pkthdr.len;
191 mn = m0;
192
193 do {
194 if (sz > MHLEN) {
195 MCLGET(mn, M_DONTWAIT);
196 if ((mn->m_flags & M_EXT) == 0) {
197 m_freem(m0);
198 return NULL;
199 }
200 }
201
202 mn->m_len = MIN(sz, MCLBYTES);
203
204 m_copydata(mold, mold->m_pkthdr.len - sz, mn->m_len,
205 mtod(mn, caddr_t));
206
207 sz -= mn->m_len;
208
209 if (sz > 0) {
210 /* need more mbufs */
211 MGET(n, M_NOWAIT, MT_DATA);
212 if (n == NULL) {
213 m_freem(m0);
214 return NULL;
215 }
216
217 mn->m_next = n;
218 mn = n;
219 }
220 } while (sz > 0);
221
222 return m0;
223 }
224
225 /*
226 * Read a word of data stored in the EEPROM at address 'addr.'
227 */
228 static uint16_t
229 vge_read_eeprom(struct vge_softc *sc, int addr)
230 {
231 int i;
232 uint16_t word = 0;
233
234 /*
235 * Enter EEPROM embedded programming mode. In order to
236 * access the EEPROM at all, we first have to set the
237 * EELOAD bit in the CHIPCFG2 register.
238 */
239 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
240 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
241
242 /* Select the address of the word we want to read */
243 CSR_WRITE_1(sc, VGE_EEADDR, addr);
244
245 /* Issue read command */
246 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
247
248 /* Wait for the done bit to be set. */
249 for (i = 0; i < VGE_TIMEOUT; i++) {
250 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
251 break;
252 }
253
254 if (i == VGE_TIMEOUT) {
255 printf("%s: EEPROM read timed out\n", sc->sc_dev.dv_xname);
256 return 0;
257 }
258
259 /* Read the result */
260 word = CSR_READ_2(sc, VGE_EERDDAT);
261
262 /* Turn off EEPROM access mode. */
263 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
264 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
265
266 return word;
267 }
268
269 static void
270 vge_miipoll_stop(sc)
271 struct vge_softc *sc;
272 {
273 int i;
274
275 CSR_WRITE_1(sc, VGE_MIICMD, 0);
276
277 for (i = 0; i < VGE_TIMEOUT; i++) {
278 DELAY(1);
279 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
280 break;
281 }
282
283 if (i == VGE_TIMEOUT) {
284 printf("%s: failed to idle MII autopoll\n",
285 sc->sc_dev.dv_xname);
286 }
287
288 return;
289 }
290
291 static void
292 vge_miipoll_start(sc)
293 struct vge_softc *sc;
294 {
295 int i;
296
297 /* First, make sure we're idle. */
298
299 CSR_WRITE_1(sc, VGE_MIICMD, 0);
300 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
301
302 for (i = 0; i < VGE_TIMEOUT; i++) {
303 DELAY(1);
304 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
305 break;
306 }
307
308 if (i == VGE_TIMEOUT) {
309 printf("%s: failed to idle MII autopoll\n",
310 sc->sc_dev.dv_xname);
311 return;
312 }
313
314 /* Now enable auto poll mode. */
315
316 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
317
318 /* And make sure it started. */
319
320 for (i = 0; i < VGE_TIMEOUT; i++) {
321 DELAY(1);
322 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
323 break;
324 }
325
326 if (i == VGE_TIMEOUT) {
327 printf("%s: failed to start MII autopoll\n",
328 sc->sc_dev.dv_xname);
329 }
330 }
331
332 static int
333 vge_miibus_readreg(dev, phy, reg)
334 struct device *dev;
335 int phy, reg;
336 {
337 struct vge_softc *sc = (struct vge_softc *)dev;
338 int i;
339 u_int16_t rval = 0;
340
341 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
342 return(0);
343
344 VGE_LOCK(sc);
345 vge_miipoll_stop(sc);
346
347 /* Specify the register we want to read. */
348 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
349
350 /* Issue read command. */
351 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
352
353 /* Wait for the read command bit to self-clear. */
354 for (i = 0; i < VGE_TIMEOUT; i++) {
355 DELAY(1);
356 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
357 break;
358 }
359
360 if (i == VGE_TIMEOUT)
361 printf("%s: MII read timed out\n", sc->sc_dev.dv_xname);
362 else
363 rval = CSR_READ_2(sc, VGE_MIIDATA);
364
365 vge_miipoll_start(sc);
366 VGE_UNLOCK(sc);
367
368 return (rval);
369 }
370
371 static void
372 vge_miibus_writereg(dev, phy, reg, data)
373 struct device *dev;
374 int phy, reg, data;
375 {
376 struct vge_softc *sc = (struct vge_softc *)dev;
377 int i;
378
379 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
380 return;
381
382 VGE_LOCK(sc);
383 vge_miipoll_stop(sc);
384
385 /* Specify the register we want to write. */
386 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
387
388 /* Specify the data we want to write. */
389 CSR_WRITE_2(sc, VGE_MIIDATA, data);
390
391 /* Issue write command. */
392 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
393
394 /* Wait for the write command bit to self-clear. */
395 for (i = 0; i < VGE_TIMEOUT; i++) {
396 DELAY(1);
397 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
398 break;
399 }
400
401 if (i == VGE_TIMEOUT) {
402 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
403 }
404
405 vge_miipoll_start(sc);
406 VGE_UNLOCK(sc);
407 }
408
409 static void
410 vge_cam_clear(sc)
411 struct vge_softc *sc;
412 {
413 int i;
414
415 /*
416 * Turn off all the mask bits. This tells the chip
417 * that none of the entries in the CAM filter are valid.
418 * desired entries will be enabled as we fill the filter in.
419 */
420
421 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
422 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
423 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
424 for (i = 0; i < 8; i++)
425 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
426
427 /* Clear the VLAN filter too. */
428
429 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
430 for (i = 0; i < 8; i++)
431 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
432
433 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
434 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
435 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
436
437 sc->vge_camidx = 0;
438
439 return;
440 }
441
442 static int
443 vge_cam_set(sc, addr)
444 struct vge_softc *sc;
445 uint8_t *addr;
446 {
447 int i, error = 0;
448
449 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
450 return(ENOSPC);
451
452 /* Select the CAM data page. */
453 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
454 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
455
456 /* Set the filter entry we want to update and enable writing. */
457 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
458
459 /* Write the address to the CAM registers */
460 for (i = 0; i < ETHER_ADDR_LEN; i++)
461 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
462
463 /* Issue a write command. */
464 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
465
466 /* Wake for it to clear. */
467 for (i = 0; i < VGE_TIMEOUT; i++) {
468 DELAY(1);
469 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
470 break;
471 }
472
473 if (i == VGE_TIMEOUT) {
474 printf("%s: setting CAM filter failed\n", sc->sc_dev.dv_xname);
475 error = EIO;
476 goto fail;
477 }
478
479 /* Select the CAM mask page. */
480 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
481 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
482
483 /* Set the mask bit that enables this filter. */
484 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
485 1<<(sc->vge_camidx & 7));
486
487 sc->vge_camidx++;
488
489 fail:
490 /* Turn off access to CAM. */
491 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
492 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
493 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
494
495 return (error);
496 }
497
498 /*
499 * Program the multicast filter. We use the 64-entry CAM filter
500 * for perfect filtering. If there's more than 64 multicast addresses,
501 * we use the hash filter insted.
502 */
503 static void
504 vge_setmulti(sc)
505 struct vge_softc *sc;
506 {
507 struct ifnet *ifp;
508 int error = 0;
509 u_int32_t h, hashes[2] = { 0, 0 };
510 struct ether_multi *enm;
511 struct ether_multistep step;
512
513 ifp = &sc->sc_ethercom.ec_if;
514
515 /* First, zot all the multicast entries. */
516 vge_cam_clear(sc);
517 CSR_WRITE_4(sc, VGE_MAR0, 0);
518 CSR_WRITE_4(sc, VGE_MAR1, 0);
519 ifp->if_flags &= ~IFF_ALLMULTI;
520
521 /*
522 * If the user wants allmulti or promisc mode, enable reception
523 * of all multicast frames.
524 */
525 if (ifp->if_flags & IFF_PROMISC) {
526 allmulti:
527 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
528 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
529 ifp->if_flags |= IFF_ALLMULTI;
530 return;
531 }
532
533 /* Now program new ones */
534 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
535 while(enm != NULL) {
536 /*
537 * If multicast range, fall back to ALLMULTI.
538 */
539 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
540 ETHER_ADDR_LEN) != 0)
541 goto allmulti;
542
543 error = vge_cam_set(sc, enm->enm_addrlo);
544 if (error)
545 break;
546
547 ETHER_NEXT_MULTI(step, enm);
548 }
549
550 /* If there were too many addresses, use the hash filter. */
551 if (error) {
552 vge_cam_clear(sc);
553
554 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
555 while(enm != NULL) {
556 /*
557 * If multicast range, fall back to ALLMULTI.
558 */
559 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
560 ETHER_ADDR_LEN) != 0)
561 goto allmulti;
562
563 h = ether_crc32_be(enm->enm_addrlo,
564 ETHER_ADDR_LEN) >> 26;
565 hashes[h >> 5] |= 1 << (h & 0x1f);
566
567 ETHER_NEXT_MULTI(step, enm);
568 }
569
570 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
571 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
572 }
573
574 return;
575 }
576
577 static void
578 vge_reset(sc)
579 struct vge_softc *sc;
580 {
581 register int i;
582
583 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
584
585 for (i = 0; i < VGE_TIMEOUT; i++) {
586 DELAY(5);
587 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
588 break;
589 }
590
591 if (i == VGE_TIMEOUT) {
592 printf("%s: soft reset timed out", sc->sc_dev.dv_xname);
593 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
594 DELAY(2000);
595 }
596
597 DELAY(5000);
598
599 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
600
601 for (i = 0; i < VGE_TIMEOUT; i++) {
602 DELAY(5);
603 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
604 break;
605 }
606
607 if (i == VGE_TIMEOUT) {
608 printf("%s: EEPROM reload timed out\n", sc->sc_dev.dv_xname);
609 return;
610 }
611
612 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
613
614 return;
615 }
616
617 /*
618 * Probe for a VIA gigabit chip. Check the PCI vendor and device
619 * IDs against our list and return a device name if we find a match.
620 */
621 static int
622 vge_probe(struct device *parent __unused, struct cfdata *match __unused,
623 void *aux)
624 {
625 struct pci_attach_args *pa = aux;
626
627 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH
628 && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X)
629 return 1;
630
631 return (0);
632 }
633
634 static int
635 vge_allocmem(sc)
636 struct vge_softc *sc;
637 {
638 int error;
639 int nseg;
640 int i;
641 bus_dma_segment_t seg;
642
643 /*
644 * Allocate map for TX descriptor list.
645 */
646 error = bus_dmamap_create(sc->vge_dmat,
647 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT,
648 &sc->vge_ldata.vge_tx_list_map);
649 if (error) {
650 printf("%s: could not allocate TX dma list map\n",
651 sc->sc_dev.dv_xname);
652 return (ENOMEM);
653 }
654
655 /*
656 * Allocate memory for TX descriptor list.
657 */
658
659 error = bus_dmamem_alloc(sc->vge_dmat, VGE_TX_LIST_SZ, VGE_RING_ALIGN,
660 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
661 if (error) {
662 printf("%s: could not allocate TX ring dma memory\n",
663 sc->sc_dev.dv_xname);
664 return (ENOMEM);
665 }
666
667 /* Map the memory to kernel VA space */
668
669 error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, VGE_TX_LIST_SZ,
670 (caddr_t *)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT);
671 if (error) {
672 printf("%s: could not map TX ring dma memory\n",
673 sc->sc_dev.dv_xname);
674 return (ENOMEM);
675 }
676
677 /* Load the map for the TX ring. */
678 error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_tx_list_map,
679 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
680 if (error) {
681 printf("%s: could not load TX ring dma memory\n",
682 sc->sc_dev.dv_xname);
683 return (ENOMEM);
684 }
685
686 /* Create DMA maps for TX buffers */
687
688 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
689 error = bus_dmamap_create(sc->vge_dmat, VGE_TX_MAXLEN,
690 VGE_TX_FRAGS, VGE_TX_MAXLEN, 0,
691 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
692 &sc->vge_ldata.vge_tx_dmamap[i]);
693 if (error) {
694 printf("%s: can't create DMA map for TX\n",
695 sc->sc_dev.dv_xname);
696 return (ENOMEM);
697 }
698 }
699
700 /*
701 * Allocate map for RX descriptor list.
702 */
703 error = bus_dmamap_create(sc->vge_dmat,
704 VGE_RX_LIST_SZ, 1, VGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT,
705 &sc->vge_ldata.vge_rx_list_map);
706 if (error) {
707 printf("%s: could not allocate RX dma list map\n",
708 sc->sc_dev.dv_xname);
709 return (ENOMEM);
710 }
711
712 /* Allocate DMA'able memory for the RX ring */
713
714 error = bus_dmamem_alloc(sc->vge_dmat, VGE_RX_LIST_SZ, VGE_RING_ALIGN,
715 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
716 if (error)
717 return (ENOMEM);
718
719 /* Map the memory to kernel VA space */
720
721 error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, VGE_RX_LIST_SZ,
722 (caddr_t *)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT);
723 if (error)
724 return (ENOMEM);
725
726 /* Load the map for the RX ring. */
727 error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_rx_list_map,
728 sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
729 if (error) {
730 printf("%s: could not load RX ring dma memory\n",
731 sc->sc_dev.dv_xname);
732 return (ENOMEM);
733 }
734
735 /* Create DMA maps for RX buffers */
736
737 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
738 error = bus_dmamap_create(sc->vge_dmat, MCLBYTES,
739 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
740 &sc->vge_ldata.vge_rx_dmamap[i]);
741 if (error) {
742 printf("%s: can't create DMA map for RX\n",
743 sc->sc_dev.dv_xname);
744 return (ENOMEM);
745 }
746 }
747
748 return (0);
749 }
750
751 /*
752 * Attach the interface. Allocate softc structures, do ifmedia
753 * setup and ethernet/BPF attach.
754 */
755 static void
756 vge_attach(struct device *parent __unused, struct device *self, void *aux)
757 {
758 uint8_t *eaddr;
759 struct vge_softc *sc = (struct vge_softc *)self;
760 struct ifnet *ifp;
761 struct pci_attach_args *pa = aux;
762 pci_chipset_tag_t pc = pa->pa_pc;
763 const char *intrstr;
764 pci_intr_handle_t ih;
765 uint16_t val;
766
767 aprint_normal(": VIA VT612X Gigabit Ethernet (rev. %#x)\n",
768 PCI_REVISION(pa->pa_class));
769
770 /* Make sure bus-mastering is enabled */
771 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
772 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
773 PCI_COMMAND_MASTER_ENABLE);
774
775 /*
776 * Map control/status registers.
777 */
778 if (0 != pci_mapreg_map(pa, VGE_PCI_LOMEM,
779 PCI_MAPREG_TYPE_MEM, 0,
780 &sc->vge_btag, &sc->vge_bhandle, NULL, NULL)) {
781 aprint_error("%s: couldn't map memory\n",
782 sc->sc_dev.dv_xname);
783 return;
784 }
785
786 /*
787 * Map and establish our interrupt.
788 */
789 if (pci_intr_map(pa, &ih)) {
790 aprint_error("%s: unable to map interrupt\n",
791 sc->sc_dev.dv_xname);
792 return;
793 }
794 intrstr = pci_intr_string(pc, ih);
795 sc->vge_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc);
796 if (sc->vge_intrhand == NULL) {
797 printf("%s: unable to establish interrupt",
798 sc->sc_dev.dv_xname);
799 if (intrstr != NULL)
800 printf(" at %s", intrstr);
801 printf("\n");
802 return;
803 }
804 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
805
806 /* Reset the adapter. */
807 vge_reset(sc);
808
809 /*
810 * Get station address from the EEPROM.
811 */
812 eaddr = sc->vge_eaddr;
813 val = vge_read_eeprom(sc, VGE_EE_EADDR + 0);
814 eaddr[0] = val & 0xff;
815 eaddr[1] = val >> 8;
816 val = vge_read_eeprom(sc, VGE_EE_EADDR + 1);
817 eaddr[2] = val & 0xff;
818 eaddr[3] = val >> 8;
819 val = vge_read_eeprom(sc, VGE_EE_EADDR + 2);
820 eaddr[4] = val & 0xff;
821 eaddr[5] = val >> 8;
822
823 printf("%s: Ethernet address: %s\n", sc->sc_dev.dv_xname,
824 ether_sprintf(eaddr));
825
826 /*
827 * Use the 32bit tag. Hardware supports 48bit physical addresses,
828 * but we don't use that for now.
829 */
830 sc->vge_dmat = pa->pa_dmat;
831
832 if (vge_allocmem(sc))
833 return;
834
835 ifp = &sc->sc_ethercom.ec_if;
836 ifp->if_softc = sc;
837 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
838 ifp->if_mtu = ETHERMTU;
839 ifp->if_baudrate = IF_Gbps(1);
840 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
841 ifp->if_ioctl = vge_ioctl;
842 ifp->if_start = vge_start;
843
844 /*
845 * We can support 802.1Q VLAN-sized frames and jumbo
846 * Ethernet frames.
847 */
848 sc->sc_ethercom.ec_capabilities |=
849 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU |
850 ETHERCAP_VLAN_HWTAGGING;
851
852 /*
853 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
854 */
855 ifp->if_capabilities |=
856 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
857 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
858 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
859
860 #ifdef DEVICE_POLLING
861 #ifdef IFCAP_POLLING
862 ifp->if_capabilities |= IFCAP_POLLING;
863 #endif
864 #endif
865 ifp->if_watchdog = vge_watchdog;
866 ifp->if_init = vge_init;
867 IFQ_SET_MAXLEN(&ifp->if_snd, max(VGE_IFQ_MAXLEN, IFQ_MAXLEN));
868
869 /*
870 * Initialize our media structures and probe the MII.
871 */
872 sc->sc_mii.mii_ifp = ifp;
873 sc->sc_mii.mii_readreg = vge_miibus_readreg;
874 sc->sc_mii.mii_writereg = vge_miibus_writereg;
875 sc->sc_mii.mii_statchg = vge_miibus_statchg;
876 ifmedia_init(&sc->sc_mii.mii_media, 0, vge_ifmedia_upd,
877 vge_ifmedia_sts);
878 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
879 MII_OFFSET_ANY, MIIF_DOPAUSE);
880 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
881 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
882 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
883 } else
884 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
885
886 /*
887 * Attach the interface.
888 */
889 if_attach(ifp);
890 ether_ifattach(ifp, eaddr);
891
892 callout_init(&sc->vge_timeout);
893 callout_setfunc(&sc->vge_timeout, vge_tick, sc);
894
895 /*
896 * Make sure the interface is shutdown during reboot.
897 */
898 if (shutdownhook_establish(vge_shutdown, sc) == NULL) {
899 printf("%s: WARNING: unable to establish shutdown hook\n",
900 sc->sc_dev.dv_xname);
901 }
902 }
903
904 static int
905 vge_newbuf(sc, idx, m)
906 struct vge_softc *sc;
907 int idx;
908 struct mbuf *m;
909 {
910 struct vge_rx_desc *d;
911 struct mbuf *n = NULL;
912 bus_dmamap_t map;
913 int i;
914
915 if (m == NULL) {
916 n = m_gethdr(M_DONTWAIT, MT_DATA);
917 if (n == NULL)
918 return (ENOBUFS);
919
920 m_clget(n, M_DONTWAIT);
921 if ((n->m_flags & M_EXT) == 0) {
922 m_freem(n);
923 return (ENOBUFS);
924 }
925
926 m = n;
927 } else
928 m->m_data = m->m_ext.ext_buf;
929
930
931 #ifndef __NO_STRICT_ALIGNMENT
932 /*
933 * This is part of an evil trick to deal with non-x86 platforms.
934 * The VIA chip requires RX buffers to be aligned on 32-bit
935 * boundaries, but that will hose non-x86 machines. To get around
936 * this, we leave some empty space at the start of each buffer
937 * and for non-x86 hosts, we copy the buffer back two bytes
938 * to achieve word alignment. This is slightly more efficient
939 * than allocating a new buffer, copying the contents, and
940 * discarding the old buffer.
941 */
942 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
943 m_adj(m, VGE_ETHER_ALIGN);
944 #else
945 m->m_len = m->m_pkthdr.len = MCLBYTES;
946 #endif
947 map = sc->vge_ldata.vge_rx_dmamap[idx];
948
949 if (bus_dmamap_load_mbuf(sc->vge_dmat, map, m, BUS_DMA_NOWAIT) != 0)
950 goto out;
951
952 d = &sc->vge_ldata.vge_rx_list[idx];
953
954 /* If this descriptor is still owned by the chip, bail. */
955
956 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
957 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
958 printf("%s: tried to map busy descriptor\n",
959 sc->sc_dev.dv_xname);
960 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
961 goto out;
962 }
963
964 d->vge_buflen =
965 htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I);
966 d->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[0].ds_addr));
967 d->vge_addrhi = htole16(VGE_ADDR_HI(map->dm_segs[0].ds_addr) & 0xFFFF);
968 d->vge_sts = 0;
969 d->vge_ctl = 0;
970 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
971
972 bus_dmamap_sync(sc->vge_dmat,
973 sc->vge_ldata.vge_rx_dmamap[idx],
974 0, sc->vge_ldata.vge_rx_dmamap[idx]->dm_mapsize,
975 BUS_DMASYNC_PREREAD);
976
977 /*
978 * Note: the manual fails to document the fact that for
979 * proper opration, the driver needs to replentish the RX
980 * DMA ring 4 descriptors at a time (rather than one at a
981 * time, like most chips). We can allocate the new buffers
982 * but we should not set the OWN bits until we're ready
983 * to hand back 4 of them in one shot.
984 */
985
986 #define VGE_RXCHUNK 4
987 sc->vge_rx_consumed++;
988 if (sc->vge_rx_consumed == VGE_RXCHUNK) {
989 for (i = idx; i != idx - sc->vge_rx_consumed; i--) {
990 sc->vge_ldata.vge_rx_list[i].vge_sts |=
991 htole32(VGE_RDSTS_OWN);
992 VGE_RXDESCSYNC(sc, i,
993 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
994 }
995 sc->vge_rx_consumed = 0;
996 }
997
998 sc->vge_ldata.vge_rx_mbuf[idx] = m;
999
1000 return (0);
1001 out:
1002 if (n != NULL)
1003 m_freem(n);
1004 return (ENOMEM);
1005 }
1006
1007 static int
1008 vge_tx_list_init(sc)
1009 struct vge_softc *sc;
1010 {
1011
1012 memset((char *)sc->vge_ldata.vge_tx_list, 0, VGE_TX_LIST_SZ);
1013 bus_dmamap_sync(sc->vge_dmat, sc->vge_ldata.vge_tx_list_map,
1014 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
1015 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1016
1017 memset((char *)&sc->vge_ldata.vge_tx_mbuf, 0,
1018 (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
1019
1020 sc->vge_ldata.vge_tx_prodidx = 0;
1021 sc->vge_ldata.vge_tx_considx = 0;
1022 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
1023
1024 return (0);
1025 }
1026
1027 static int
1028 vge_rx_list_init(sc)
1029 struct vge_softc *sc;
1030 {
1031 int i;
1032
1033 memset((char *)sc->vge_ldata.vge_rx_list, 0, VGE_RX_LIST_SZ);
1034 memset((char *)&sc->vge_ldata.vge_rx_mbuf, 0,
1035 (VGE_RX_DESC_CNT * sizeof(struct mbuf *)));
1036
1037 sc->vge_rx_consumed = 0;
1038
1039 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1040 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
1041 return (ENOBUFS);
1042 }
1043
1044 sc->vge_ldata.vge_rx_prodidx = 0;
1045 sc->vge_rx_consumed = 0;
1046 sc->vge_head = sc->vge_tail = NULL;
1047
1048 return (0);
1049 }
1050
1051 #ifndef __NO_STRICT_ALIGNMENT
1052 static inline void
1053 vge_fixup_rx(m)
1054 struct mbuf *m;
1055 {
1056 int i;
1057 uint16_t *src, *dst;
1058
1059 src = mtod(m, uint16_t *);
1060 dst = src - 1;
1061
1062 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1063 *dst++ = *src++;
1064
1065 m->m_data -= ETHER_ALIGN;
1066
1067 return;
1068 }
1069 #endif
1070
1071 /*
1072 * RX handler. We support the reception of jumbo frames that have
1073 * been fragmented across multiple 2K mbuf cluster buffers.
1074 */
1075 static void
1076 vge_rxeof(sc)
1077 struct vge_softc *sc;
1078 {
1079 struct mbuf *m;
1080 struct ifnet *ifp;
1081 int idx, total_len;
1082 int lim = 0;
1083 struct vge_rx_desc *cur_rx;
1084 u_int32_t rxstat, rxctl;
1085
1086 VGE_LOCK_ASSERT(sc);
1087 ifp = &sc->sc_ethercom.ec_if;
1088 idx = sc->vge_ldata.vge_rx_prodidx;
1089
1090 /* Invalidate the descriptor memory */
1091
1092 for (;;) {
1093 cur_rx = &sc->vge_ldata.vge_rx_list[idx];
1094
1095 VGE_RXDESCSYNC(sc, idx,
1096 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1097 rxstat = le32toh(cur_rx->vge_sts);
1098 if ((rxstat & VGE_RDSTS_OWN) != 0) {
1099 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1100 break;
1101 }
1102
1103 #ifdef DEVICE_POLLING
1104 if (ifp->if_flags & IFF_POLLING) {
1105 if (sc->rxcycles <= 0)
1106 break;
1107 sc->rxcycles--;
1108 }
1109 #endif /* DEVICE_POLLING */
1110
1111 m = sc->vge_ldata.vge_rx_mbuf[idx];
1112 total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16;
1113 rxctl = le32toh(cur_rx->vge_ctl);
1114
1115 /* Invalidate the RX mbuf and unload its map */
1116
1117 bus_dmamap_sync(sc->vge_dmat,
1118 sc->vge_ldata.vge_rx_dmamap[idx],
1119 0, sc->vge_ldata.vge_rx_dmamap[idx]->dm_mapsize,
1120 BUS_DMASYNC_POSTREAD);
1121 bus_dmamap_unload(sc->vge_dmat,
1122 sc->vge_ldata.vge_rx_dmamap[idx]);
1123
1124 /*
1125 * If the 'start of frame' bit is set, this indicates
1126 * either the first fragment in a multi-fragment receive,
1127 * or an intermediate fragment. Either way, we want to
1128 * accumulate the buffers.
1129 */
1130 if (rxstat & VGE_RXPKT_SOF) {
1131 m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
1132 if (sc->vge_head == NULL)
1133 sc->vge_head = sc->vge_tail = m;
1134 else {
1135 m->m_flags &= ~M_PKTHDR;
1136 sc->vge_tail->m_next = m;
1137 sc->vge_tail = m;
1138 }
1139 vge_newbuf(sc, idx, NULL);
1140 VGE_RX_DESC_INC(idx);
1141 continue;
1142 }
1143
1144 /*
1145 * Bad/error frames will have the RXOK bit cleared.
1146 * However, there's one error case we want to allow:
1147 * if a VLAN tagged frame arrives and the chip can't
1148 * match it against the CAM filter, it considers this
1149 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1150 * We don't want to drop the frame though: our VLAN
1151 * filtering is done in software.
1152 */
1153 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM)
1154 && !(rxstat & VGE_RDSTS_CSUMERR)) {
1155 ifp->if_ierrors++;
1156 /*
1157 * If this is part of a multi-fragment packet,
1158 * discard all the pieces.
1159 */
1160 if (sc->vge_head != NULL) {
1161 m_freem(sc->vge_head);
1162 sc->vge_head = sc->vge_tail = NULL;
1163 }
1164 vge_newbuf(sc, idx, m);
1165 VGE_RX_DESC_INC(idx);
1166 continue;
1167 }
1168
1169 /*
1170 * If allocating a replacement mbuf fails,
1171 * reload the current one.
1172 */
1173
1174 if (vge_newbuf(sc, idx, NULL)) {
1175 ifp->if_ierrors++;
1176 if (sc->vge_head != NULL) {
1177 m_freem(sc->vge_head);
1178 sc->vge_head = sc->vge_tail = NULL;
1179 }
1180 vge_newbuf(sc, idx, m);
1181 VGE_RX_DESC_INC(idx);
1182 continue;
1183 }
1184
1185 VGE_RX_DESC_INC(idx);
1186
1187 if (sc->vge_head != NULL) {
1188 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
1189 /*
1190 * Special case: if there's 4 bytes or less
1191 * in this buffer, the mbuf can be discarded:
1192 * the last 4 bytes is the CRC, which we don't
1193 * care about anyway.
1194 */
1195 if (m->m_len <= ETHER_CRC_LEN) {
1196 sc->vge_tail->m_len -=
1197 (ETHER_CRC_LEN - m->m_len);
1198 m_freem(m);
1199 } else {
1200 m->m_len -= ETHER_CRC_LEN;
1201 m->m_flags &= ~M_PKTHDR;
1202 sc->vge_tail->m_next = m;
1203 }
1204 m = sc->vge_head;
1205 sc->vge_head = sc->vge_tail = NULL;
1206 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1207 } else
1208 m->m_pkthdr.len = m->m_len =
1209 (total_len - ETHER_CRC_LEN);
1210
1211 #ifndef __NO_STRICT_ALIGNMENT
1212 vge_fixup_rx(m);
1213 #endif
1214 ifp->if_ipackets++;
1215 m->m_pkthdr.rcvif = ifp;
1216
1217 /* Do RX checksumming if enabled */
1218 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) {
1219
1220 /* Check IP header checksum */
1221 if (rxctl & VGE_RDCTL_IPPKT)
1222 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1223 if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0)
1224 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1225 }
1226
1227 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
1228 /* Check UDP checksum */
1229 if (rxctl & VGE_RDCTL_TCPPKT)
1230 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1231
1232 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1233 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1234 }
1235
1236 if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) {
1237 /* Check UDP checksum */
1238 if (rxctl & VGE_RDCTL_UDPPKT)
1239 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1240
1241 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1242 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1243 }
1244
1245 if (rxstat & VGE_RDSTS_VTAG)
1246 VLAN_INPUT_TAG(ifp, m,
1247 ntohs((rxctl & VGE_RDCTL_VLANID)), continue);
1248
1249 #if NBPFILTER > 0
1250 /*
1251 * Handle BPF listeners.
1252 */
1253 if (ifp->if_bpf)
1254 bpf_mtap(ifp->if_bpf, m);
1255 #endif
1256
1257 VGE_UNLOCK(sc);
1258 (*ifp->if_input)(ifp, m);
1259 VGE_LOCK(sc);
1260
1261 lim++;
1262 if (lim == VGE_RX_DESC_CNT)
1263 break;
1264
1265 }
1266
1267 sc->vge_ldata.vge_rx_prodidx = idx;
1268 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1269 }
1270
1271 static void
1272 vge_txeof(sc)
1273 struct vge_softc *sc;
1274 {
1275 struct ifnet *ifp;
1276 u_int32_t txstat;
1277 int idx;
1278
1279 ifp = &sc->sc_ethercom.ec_if;
1280 idx = sc->vge_ldata.vge_tx_considx;
1281
1282 while (idx != sc->vge_ldata.vge_tx_prodidx) {
1283 VGE_TXDESCSYNC(sc, idx,
1284 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1285
1286 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
1287 if (txstat & VGE_TDSTS_OWN) {
1288 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1289 break;
1290 }
1291
1292 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
1293 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
1294 bus_dmamap_unload(sc->vge_dmat,
1295 sc->vge_ldata.vge_tx_dmamap[idx]);
1296 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1297 ifp->if_collisions++;
1298 if (txstat & VGE_TDSTS_TXERR)
1299 ifp->if_oerrors++;
1300 else
1301 ifp->if_opackets++;
1302
1303 sc->vge_ldata.vge_tx_free++;
1304 VGE_TX_DESC_INC(idx);
1305 }
1306
1307 /* No changes made to the TX ring, so no flush needed */
1308
1309 if (idx != sc->vge_ldata.vge_tx_considx) {
1310 sc->vge_ldata.vge_tx_considx = idx;
1311 ifp->if_flags &= ~IFF_OACTIVE;
1312 ifp->if_timer = 0;
1313 }
1314
1315 /*
1316 * If not all descriptors have been released reaped yet,
1317 * reload the timer so that we will eventually get another
1318 * interrupt that will cause us to re-enter this routine.
1319 * This is done in case the transmitter has gone idle.
1320 */
1321 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) {
1322 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1323 }
1324 }
1325
1326 static void
1327 vge_tick(xsc)
1328 void *xsc;
1329 {
1330 struct vge_softc *sc = xsc;
1331 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1332 struct mii_data *mii = &sc->sc_mii;
1333 int s;
1334
1335 s = splnet();
1336
1337 VGE_LOCK(sc);
1338
1339 callout_schedule(&sc->vge_timeout, hz);
1340
1341 mii_tick(mii);
1342 if (sc->vge_link) {
1343 if (!(mii->mii_media_status & IFM_ACTIVE))
1344 sc->vge_link = 0;
1345 } else {
1346 if (mii->mii_media_status & IFM_ACTIVE &&
1347 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1348 sc->vge_link = 1;
1349 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1350 vge_start(ifp);
1351 }
1352 }
1353
1354 VGE_UNLOCK(sc);
1355
1356 splx(s);
1357 }
1358
1359 #ifdef DEVICE_POLLING
1360 static void
1361 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1362 {
1363 struct vge_softc *sc = ifp->if_softc;
1364
1365 VGE_LOCK(sc);
1366 #ifdef IFCAP_POLLING
1367 if (!(ifp->if_capenable & IFCAP_POLLING)) {
1368 ether_poll_deregister(ifp);
1369 cmd = POLL_DEREGISTER;
1370 }
1371 #endif
1372 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1373 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1374 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
1375 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1376 goto done;
1377 }
1378
1379 sc->rxcycles = count;
1380 vge_rxeof(sc);
1381 vge_txeof(sc);
1382
1383 #if __FreeBSD_version < 502114
1384 if (ifp->if_snd.ifq_head != NULL)
1385 #else
1386 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1387 #endif
1388 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
1389
1390 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1391 u_int32_t status;
1392 status = CSR_READ_4(sc, VGE_ISR);
1393 if (status == 0xFFFFFFFF)
1394 goto done;
1395 if (status)
1396 CSR_WRITE_4(sc, VGE_ISR, status);
1397
1398 /*
1399 * XXX check behaviour on receiver stalls.
1400 */
1401
1402 if (status & VGE_ISR_TXDMA_STALL ||
1403 status & VGE_ISR_RXDMA_STALL)
1404 vge_init(sc);
1405
1406 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1407 vge_rxeof(sc);
1408 ifp->if_ierrors++;
1409 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1410 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1411 }
1412 }
1413 done:
1414 VGE_UNLOCK(sc);
1415 }
1416 #endif /* DEVICE_POLLING */
1417
1418 static int
1419 vge_intr(arg)
1420 void *arg;
1421 {
1422 struct vge_softc *sc = arg;
1423 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1424 u_int32_t status;
1425 int claim = 0;
1426
1427 if (sc->suspended) {
1428 return claim;
1429 }
1430
1431 VGE_LOCK(sc);
1432
1433 if (!(ifp->if_flags & IFF_UP)) {
1434 VGE_UNLOCK(sc);
1435 return claim;
1436 }
1437
1438 #ifdef DEVICE_POLLING
1439 if (ifp->if_flags & IFF_POLLING)
1440 goto done;
1441 if (
1442 #ifdef IFCAP_POLLING
1443 (ifp->if_capenable & IFCAP_POLLING) &&
1444 #endif
1445 ether_poll_register(vge_poll, ifp)) { /* ok, disable interrupts */
1446 CSR_WRITE_4(sc, VGE_IMR, 0);
1447 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1448 vge_poll(ifp, 0, 1);
1449 goto done;
1450 }
1451
1452 #endif /* DEVICE_POLLING */
1453
1454 /* Disable interrupts */
1455 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1456
1457 for (;;) {
1458
1459 status = CSR_READ_4(sc, VGE_ISR);
1460 /* If the card has gone away the read returns 0xffff. */
1461 if (status == 0xFFFFFFFF)
1462 break;
1463
1464 if (status) {
1465 claim = 1;
1466 CSR_WRITE_4(sc, VGE_ISR, status);
1467 }
1468
1469 if ((status & VGE_INTRS) == 0)
1470 break;
1471
1472 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1473 vge_rxeof(sc);
1474
1475 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1476 vge_rxeof(sc);
1477 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1478 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1479 }
1480
1481 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1482 vge_txeof(sc);
1483
1484 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1485 vge_init(ifp);
1486
1487 if (status & VGE_ISR_LINKSTS)
1488 vge_tick(sc);
1489 }
1490
1491 /* Re-enable interrupts */
1492 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1493
1494 #ifdef DEVICE_POLLING
1495 done:
1496 #endif
1497 VGE_UNLOCK(sc);
1498
1499 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1500 vge_start(ifp);
1501
1502 return claim;
1503 }
1504
1505 static int
1506 vge_encap(sc, m_head, idx)
1507 struct vge_softc *sc;
1508 struct mbuf *m_head;
1509 int idx;
1510 {
1511 struct vge_tx_desc *d;
1512 struct vge_tx_frag *f;
1513 struct mbuf *m_new = NULL;
1514 bus_dmamap_t map;
1515 int seg, error, flags;
1516 struct m_tag *mtag;
1517 size_t sz;
1518
1519 d = &sc->vge_ldata.vge_tx_list[idx];
1520
1521 /* If this descriptor is still owned by the chip, bail. */
1522 if (sc->vge_ldata.vge_tx_free <= 2) {
1523 VGE_TXDESCSYNC(sc, idx,
1524 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1525 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) {
1526 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1527 return (ENOBUFS);
1528 }
1529 }
1530
1531 map = sc->vge_ldata.vge_tx_dmamap[idx];
1532 error = bus_dmamap_load_mbuf(sc->vge_dmat, map,
1533 m_head, BUS_DMA_NOWAIT);
1534
1535 /* If too many segments to map, coalesce */
1536 if (error == EFBIG) {
1537 m_new = m_defrag(m_head, M_DONTWAIT);
1538 if (m_new == NULL)
1539 return (error);
1540
1541 error = bus_dmamap_load_mbuf(sc->vge_dmat, map,
1542 m_new, BUS_DMA_NOWAIT);
1543 if (error) {
1544 m_freem(m_new);
1545 return (error);
1546 }
1547
1548 m_head = m_new;
1549 } else if (error)
1550 return (error);
1551
1552 for (seg = 0, f = &d->vge_frag[0]; seg < map->dm_nsegs; seg++, f++) {
1553 f->vge_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len));
1554 f->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[seg].ds_addr));
1555 f->vge_addrhi = htole16(VGE_ADDR_HI(map->dm_segs[seg].ds_addr));
1556 }
1557
1558 /* Argh. This chip does not autopad short frames */
1559
1560 sz = m_head->m_pkthdr.len;
1561 if (m_head->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1562 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - sz));
1563 f->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[0].ds_addr));
1564 f->vge_addrhi =
1565 htole16(VGE_ADDR_HI(map->dm_segs[0].ds_addr) & 0xFFFF);
1566 sz = VGE_MIN_FRAMELEN;
1567 seg++;
1568 }
1569 VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE);
1570
1571 /*
1572 * When telling the chip how many segments there are, we
1573 * must use nsegs + 1 instead of just nsegs. Darned if I
1574 * know why.
1575 */
1576 seg++;
1577
1578 flags = 0;
1579 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
1580 flags |= VGE_TDCTL_IPCSUM;
1581 if (m_head->m_pkthdr.csum_flags & M_CSUM_TCPv4)
1582 flags |= VGE_TDCTL_TCPCSUM;
1583 if (m_head->m_pkthdr.csum_flags & M_CSUM_UDPv4)
1584 flags |= VGE_TDCTL_UDPCSUM;
1585 d->vge_sts = htole32(sz << 16);
1586 d->vge_ctl = htole32(flags | (seg << 28) | VGE_TD_LS_NORM);
1587
1588 if (sz > ETHERMTU + ETHER_HDR_LEN)
1589 d->vge_ctl |= htole32(VGE_TDCTL_JUMBO);
1590
1591 bus_dmamap_sync(sc->vge_dmat, map, 0, map->dm_mapsize,
1592 BUS_DMASYNC_PREWRITE);
1593
1594 sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
1595 sc->vge_ldata.vge_tx_free--;
1596
1597 /*
1598 * Set up hardware VLAN tagging.
1599 */
1600
1601 mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m_head);
1602 if (mtag != NULL)
1603 d->vge_ctl |=
1604 htole32(htons(VLAN_TAG_VALUE(mtag)) | VGE_TDCTL_VTAG);
1605
1606 d->vge_sts |= htole32(VGE_TDSTS_OWN);
1607
1608 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1609
1610 return (0);
1611 }
1612
1613 /*
1614 * Main transmit routine.
1615 */
1616
1617 static void
1618 vge_start(ifp)
1619 struct ifnet *ifp;
1620 {
1621 struct vge_softc *sc;
1622 struct mbuf *m_head = NULL;
1623 int idx, pidx = 0, error;
1624
1625 sc = ifp->if_softc;
1626 VGE_LOCK(sc);
1627
1628 if (!sc->vge_link
1629 || (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
1630 VGE_UNLOCK(sc);
1631 return;
1632 }
1633
1634 idx = sc->vge_ldata.vge_tx_prodidx;
1635
1636 pidx = idx - 1;
1637 if (pidx < 0)
1638 pidx = VGE_TX_DESC_CNT - 1;
1639
1640 /*
1641 * Loop through the send queue, setting up transmit descriptors
1642 * until we drain the queue, or use up all available transmit
1643 * descriptors.
1644 */
1645 for(;;) {
1646 /* Grab a packet off the queue. */
1647 IFQ_POLL(&ifp->if_snd, m_head);
1648 if (m_head == NULL)
1649 break;
1650
1651 if (sc->vge_ldata.vge_tx_mbuf[idx] != NULL) {
1652 /*
1653 * Slot already used, stop for now.
1654 */
1655 ifp->if_flags |= IFF_OACTIVE;
1656 break;
1657 }
1658
1659 if ((error = vge_encap(sc, m_head, idx))) {
1660 if (error == EFBIG) {
1661 printf("%s: Tx packet consumes too many "
1662 "DMA segments, dropping...\n",
1663 sc->sc_dev.dv_xname);
1664 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1665 m_freem(m_head);
1666 continue;
1667 }
1668
1669 /*
1670 * Short on resources, just stop for now.
1671 */
1672 if (error == ENOBUFS)
1673 ifp->if_flags |= IFF_OACTIVE;
1674 break;
1675 }
1676
1677 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1678
1679 /*
1680 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1681 */
1682
1683 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
1684 htole16(VGE_TXDESC_Q);
1685 VGE_TXDESCSYNC(sc, pidx,
1686 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1687
1688 if (sc->vge_ldata.vge_tx_mbuf[idx] != m_head) {
1689 m_freem(m_head);
1690 m_head = sc->vge_ldata.vge_tx_mbuf[idx];
1691 }
1692
1693 pidx = idx;
1694 VGE_TX_DESC_INC(idx);
1695
1696 /*
1697 * If there's a BPF listener, bounce a copy of this frame
1698 * to him.
1699 */
1700 #if NBPFILTER > 0
1701 if (ifp->if_bpf)
1702 bpf_mtap(ifp->if_bpf, m_head);
1703 #endif
1704 }
1705
1706 if (idx == sc->vge_ldata.vge_tx_prodidx) {
1707 VGE_UNLOCK(sc);
1708 return;
1709 }
1710
1711 /* Issue a transmit command. */
1712 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1713
1714 sc->vge_ldata.vge_tx_prodidx = idx;
1715
1716 /*
1717 * Use the countdown timer for interrupt moderation.
1718 * 'TX done' interrupts are disabled. Instead, we reset the
1719 * countdown timer, which will begin counting until it hits
1720 * the value in the SSTIMER register, and then trigger an
1721 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1722 * the timer count is reloaded. Only when the transmitter
1723 * is idle will the timer hit 0 and an interrupt fire.
1724 */
1725 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1726
1727 VGE_UNLOCK(sc);
1728
1729 /*
1730 * Set a timeout in case the chip goes out to lunch.
1731 */
1732 ifp->if_timer = 5;
1733
1734 return;
1735 }
1736
1737 static int
1738 vge_init(ifp)
1739 struct ifnet *ifp;
1740 {
1741 struct vge_softc *sc = ifp->if_softc;
1742 struct mii_data *mii = &sc->sc_mii;
1743 int i;
1744
1745 VGE_LOCK(sc);
1746
1747 /*
1748 * Cancel pending I/O and free all RX/TX buffers.
1749 */
1750 vge_stop(sc);
1751 vge_reset(sc);
1752
1753 /*
1754 * Initialize the RX and TX descriptors and mbufs.
1755 */
1756
1757 vge_rx_list_init(sc);
1758 vge_tx_list_init(sc);
1759
1760 /* Set our station address */
1761 for (i = 0; i < ETHER_ADDR_LEN; i++)
1762 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->vge_eaddr[i]);
1763
1764 /*
1765 * Set receive FIFO threshold. Also allow transmission and
1766 * reception of VLAN tagged frames.
1767 */
1768 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1769 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1770
1771 /* Set DMA burst length */
1772 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1773 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1774
1775 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1776
1777 /* Set collision backoff algorithm */
1778 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1779 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1780 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1781
1782 /* Disable LPSEL field in priority resolution */
1783 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1784
1785 /*
1786 * Load the addresses of the DMA queues into the chip.
1787 * Note that we only use one transmit queue.
1788 */
1789
1790 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
1791 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_map->dm_segs[0].ds_addr));
1792 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
1793
1794 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
1795 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_map->dm_segs[0].ds_addr));
1796 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
1797 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
1798
1799 /* Enable and wake up the RX descriptor queue */
1800 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1801 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1802
1803 /* Enable the TX descriptor queue */
1804 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1805
1806 /* Set up the receive filter -- allow large frames for VLANs. */
1807 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1808
1809 /* If we want promiscuous mode, set the allframes bit. */
1810 if (ifp->if_flags & IFF_PROMISC) {
1811 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1812 }
1813
1814 /* Set capture broadcast bit to capture broadcast frames. */
1815 if (ifp->if_flags & IFF_BROADCAST) {
1816 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1817 }
1818
1819 /* Set multicast bit to capture multicast frames. */
1820 if (ifp->if_flags & IFF_MULTICAST) {
1821 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1822 }
1823
1824 /* Init the cam filter. */
1825 vge_cam_clear(sc);
1826
1827 /* Init the multicast filter. */
1828 vge_setmulti(sc);
1829
1830 /* Enable flow control */
1831
1832 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1833
1834 /* Enable jumbo frame reception (if desired) */
1835
1836 /* Start the MAC. */
1837 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1838 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1839 CSR_WRITE_1(sc, VGE_CRS0,
1840 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1841
1842 /*
1843 * Configure one-shot timer for microsecond
1844 * resulution and load it for 500 usecs.
1845 */
1846 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
1847 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
1848
1849 /*
1850 * Configure interrupt moderation for receive. Enable
1851 * the holdoff counter and load it, and set the RX
1852 * suppression count to the number of descriptors we
1853 * want to allow before triggering an interrupt.
1854 * The holdoff timer is in units of 20 usecs.
1855 */
1856
1857 #ifdef notyet
1858 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
1859 /* Select the interrupt holdoff timer page. */
1860 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1861 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
1862 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
1863
1864 /* Enable use of the holdoff timer. */
1865 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
1866 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
1867
1868 /* Select the RX suppression threshold page. */
1869 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1870 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
1871 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
1872
1873 /* Restore the page select bits. */
1874 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1875 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
1876 #endif
1877
1878 #ifdef DEVICE_POLLING
1879 /*
1880 * Disable interrupts if we are polling.
1881 */
1882 if (ifp->if_flags & IFF_POLLING) {
1883 CSR_WRITE_4(sc, VGE_IMR, 0);
1884 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1885 } else /* otherwise ... */
1886 #endif /* DEVICE_POLLING */
1887 {
1888 /*
1889 * Enable interrupts.
1890 */
1891 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1892 CSR_WRITE_4(sc, VGE_ISR, 0);
1893 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1894 }
1895
1896 mii_mediachg(mii);
1897
1898 ifp->if_flags |= IFF_RUNNING;
1899 ifp->if_flags &= ~IFF_OACTIVE;
1900
1901 sc->vge_if_flags = 0;
1902 sc->vge_link = 0;
1903
1904 VGE_UNLOCK(sc);
1905
1906 callout_schedule(&sc->vge_timeout, hz);
1907
1908 return (0);
1909 }
1910
1911 /*
1912 * Set media options.
1913 */
1914 static int
1915 vge_ifmedia_upd(ifp)
1916 struct ifnet *ifp;
1917 {
1918 struct vge_softc *sc = ifp->if_softc;
1919 struct mii_data *mii = &sc->sc_mii;
1920
1921 mii_mediachg(mii);
1922
1923 return (0);
1924 }
1925
1926 /*
1927 * Report current media status.
1928 */
1929 static void
1930 vge_ifmedia_sts(ifp, ifmr)
1931 struct ifnet *ifp;
1932 struct ifmediareq *ifmr;
1933 {
1934 struct vge_softc *sc = ifp->if_softc;
1935 struct mii_data *mii = &sc->sc_mii;
1936
1937 mii_pollstat(mii);
1938 ifmr->ifm_active = mii->mii_media_active;
1939 ifmr->ifm_status = mii->mii_media_status;
1940
1941 return;
1942 }
1943
1944 static void
1945 vge_miibus_statchg(self)
1946 struct device *self;
1947 {
1948 struct vge_softc *sc = (struct vge_softc *) self;
1949 struct mii_data *mii = &sc->sc_mii;
1950 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
1951
1952 /*
1953 * If the user manually selects a media mode, we need to turn
1954 * on the forced MAC mode bit in the DIAGCTL register. If the
1955 * user happens to choose a full duplex mode, we also need to
1956 * set the 'force full duplex' bit. This applies only to
1957 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
1958 * mode is disabled, and in 1000baseT mode, full duplex is
1959 * always implied, so we turn on the forced mode bit but leave
1960 * the FDX bit cleared.
1961 */
1962
1963 switch (IFM_SUBTYPE(ife->ifm_media)) {
1964 case IFM_AUTO:
1965 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1966 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1967 break;
1968 case IFM_1000_T:
1969 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1970 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1971 break;
1972 case IFM_100_TX:
1973 case IFM_10_T:
1974 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1975 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
1976 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1977 } else {
1978 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1979 }
1980 break;
1981 default:
1982 printf("%s: unknown media type: %x\n",
1983 sc->sc_dev.dv_xname,
1984 IFM_SUBTYPE(ife->ifm_media));
1985 break;
1986 }
1987
1988 return;
1989 }
1990
1991 static int
1992 vge_ioctl(ifp, command, data)
1993 struct ifnet *ifp;
1994 u_long command;
1995 caddr_t data;
1996 {
1997 struct vge_softc *sc = ifp->if_softc;
1998 struct ifreq *ifr = (struct ifreq *) data;
1999 struct mii_data *mii;
2000 int s, error = 0;
2001
2002 s = splnet();
2003
2004 switch (command) {
2005 case SIOCSIFMTU:
2006 if (ifr->ifr_mtu > VGE_JUMBO_MTU)
2007 error = EINVAL;
2008 ifp->if_mtu = ifr->ifr_mtu;
2009 break;
2010 case SIOCSIFFLAGS:
2011 if (ifp->if_flags & IFF_UP) {
2012 if (ifp->if_flags & IFF_RUNNING &&
2013 ifp->if_flags & IFF_PROMISC &&
2014 !(sc->vge_if_flags & IFF_PROMISC)) {
2015 CSR_SETBIT_1(sc, VGE_RXCTL,
2016 VGE_RXCTL_RX_PROMISC);
2017 vge_setmulti(sc);
2018 } else if (ifp->if_flags & IFF_RUNNING &&
2019 !(ifp->if_flags & IFF_PROMISC) &&
2020 sc->vge_if_flags & IFF_PROMISC) {
2021 CSR_CLRBIT_1(sc, VGE_RXCTL,
2022 VGE_RXCTL_RX_PROMISC);
2023 vge_setmulti(sc);
2024 } else
2025 vge_init(ifp);
2026 } else {
2027 if (ifp->if_flags & IFF_RUNNING)
2028 vge_stop(sc);
2029 }
2030 sc->vge_if_flags = ifp->if_flags;
2031 break;
2032 case SIOCADDMULTI:
2033 case SIOCDELMULTI:
2034 error = (command == SIOCADDMULTI) ?
2035 ether_addmulti(ifr, &sc->sc_ethercom) :
2036 ether_delmulti(ifr, &sc->sc_ethercom);
2037
2038 if (error == ENETRESET) {
2039 /*
2040 * Multicast list has changed; set the hardware filter
2041 * accordingly.
2042 */
2043 if (ifp->if_flags & IFF_RUNNING)
2044 vge_setmulti(sc);
2045 error = 0;
2046 }
2047 break;
2048 case SIOCGIFMEDIA:
2049 case SIOCSIFMEDIA:
2050 mii = &sc->sc_mii;
2051 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2052 break;
2053 default:
2054 error = ether_ioctl(ifp, command, data);
2055 break;
2056 }
2057
2058 splx(s);
2059 return (error);
2060 }
2061
2062 static void
2063 vge_watchdog(ifp)
2064 struct ifnet *ifp;
2065 {
2066 struct vge_softc *sc;
2067
2068 sc = ifp->if_softc;
2069 VGE_LOCK(sc);
2070 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2071 ifp->if_oerrors++;
2072
2073 vge_txeof(sc);
2074 vge_rxeof(sc);
2075
2076 vge_init(ifp);
2077
2078 VGE_UNLOCK(sc);
2079
2080 return;
2081 }
2082
2083 /*
2084 * Stop the adapter and free any mbufs allocated to the
2085 * RX and TX lists.
2086 */
2087 static void
2088 vge_stop(sc)
2089 struct vge_softc *sc;
2090 {
2091 register int i;
2092 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2093
2094 VGE_LOCK(sc);
2095 ifp->if_timer = 0;
2096
2097 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2098 #ifdef DEVICE_POLLING
2099 ether_poll_deregister(ifp);
2100 #endif /* DEVICE_POLLING */
2101
2102 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2103 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2104 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2105 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2106 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2107 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2108
2109 if (sc->vge_head != NULL) {
2110 m_freem(sc->vge_head);
2111 sc->vge_head = sc->vge_tail = NULL;
2112 }
2113
2114 /* Free the TX list buffers. */
2115
2116 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
2117 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
2118 bus_dmamap_unload(sc->vge_dmat,
2119 sc->vge_ldata.vge_tx_dmamap[i]);
2120 m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
2121 sc->vge_ldata.vge_tx_mbuf[i] = NULL;
2122 }
2123 }
2124
2125 /* Free the RX list buffers. */
2126
2127 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
2128 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
2129 bus_dmamap_unload(sc->vge_dmat,
2130 sc->vge_ldata.vge_rx_dmamap[i]);
2131 m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
2132 sc->vge_ldata.vge_rx_mbuf[i] = NULL;
2133 }
2134 }
2135
2136 VGE_UNLOCK(sc);
2137
2138 return;
2139 }
2140
2141 #if VGE_POWER_MANAGEMENT
2142 /*
2143 * Device suspend routine. Stop the interface and save some PCI
2144 * settings in case the BIOS doesn't restore them properly on
2145 * resume.
2146 */
2147 static int
2148 vge_suspend(dev)
2149 struct device * dev;
2150 {
2151 struct vge_softc *sc;
2152 int i;
2153
2154 sc = device_get_softc(dev);
2155
2156 vge_stop(sc);
2157
2158 for (i = 0; i < 5; i++)
2159 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2160 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2161 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2162 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2163 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2164
2165 sc->suspended = 1;
2166
2167 return (0);
2168 }
2169
2170 /*
2171 * Device resume routine. Restore some PCI settings in case the BIOS
2172 * doesn't, re-enable busmastering, and restart the interface if
2173 * appropriate.
2174 */
2175 static int
2176 vge_resume(dev)
2177 struct device * dev;
2178 {
2179 struct vge_softc *sc = (struct vge_softc *)dev;
2180 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2181 int i;
2182
2183 /* better way to do this? */
2184 for (i = 0; i < 5; i++)
2185 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
2186 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
2187 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
2188 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
2189 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
2190
2191 /* reenable busmastering */
2192 pci_enable_busmaster(dev);
2193 pci_enable_io(dev, SYS_RES_MEMORY);
2194
2195 /* reinitialize interface if necessary */
2196 if (ifp->if_flags & IFF_UP)
2197 vge_init(sc);
2198
2199 sc->suspended = 0;
2200
2201 return (0);
2202 }
2203 #endif
2204
2205 /*
2206 * Stop all chip I/O so that the kernel's probe routines don't
2207 * get confused by errant DMAs when rebooting.
2208 */
2209 static void
2210 vge_shutdown(arg)
2211 void *arg;
2212 {
2213 struct vge_softc *sc = (struct vge_softc *)arg;
2214
2215 vge_stop(sc);
2216 }
2217