if_vge.c revision 1.20 1 /* $NetBSD: if_vge.c,v 1.20 2006/10/21 16:26:35 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 2004
5 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.20 2006/10/21 16:26:35 tsutsui Exp $");
39
40 /*
41 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
42 *
43 * Written by Bill Paul <wpaul (at) windriver.com>
44 * Senior Networking Software Engineer
45 * Wind River Systems
46 */
47
48 /*
49 * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that
50 * combines a tri-speed ethernet MAC and PHY, with the following
51 * features:
52 *
53 * o Jumbo frame support up to 16K
54 * o Transmit and receive flow control
55 * o IPv4 checksum offload
56 * o VLAN tag insertion and stripping
57 * o TCP large send
58 * o 64-bit multicast hash table filter
59 * o 64 entry CAM filter
60 * o 16K RX FIFO and 48K TX FIFO memory
61 * o Interrupt moderation
62 *
63 * The VT6122 supports up to four transmit DMA queues. The descriptors
64 * in the transmit ring can address up to 7 data fragments; frames which
65 * span more than 7 data buffers must be coalesced, but in general the
66 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
67 * long. The receive descriptors address only a single buffer.
68 *
69 * There are two peculiar design issues with the VT6122. One is that
70 * receive data buffers must be aligned on a 32-bit boundary. This is
71 * not a problem where the VT6122 is used as a LOM device in x86-based
72 * systems, but on architectures that generate unaligned access traps, we
73 * have to do some copying.
74 *
75 * The other issue has to do with the way 64-bit addresses are handled.
76 * The DMA descriptors only allow you to specify 48 bits of addressing
77 * information. The remaining 16 bits are specified using one of the
78 * I/O registers. If you only have a 32-bit system, then this isn't
79 * an issue, but if you have a 64-bit system and more than 4GB of
80 * memory, you must have to make sure your network data buffers reside
81 * in the same 48-bit 'segment.'
82 *
83 * Special thanks to Ryan Fu at VIA Networking for providing documentation
84 * and sample NICs for testing.
85 */
86
87 #include "bpfilter.h"
88
89 #include <sys/param.h>
90 #include <sys/endian.h>
91 #include <sys/systm.h>
92 #include <sys/sockio.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97
98 #include <net/if.h>
99 #include <net/if_arp.h>
100 #include <net/if_ether.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103
104 #include <net/bpf.h>
105
106 #include <machine/bus.h>
107
108 #include <dev/mii/mii.h>
109 #include <dev/mii/miivar.h>
110
111 #include <dev/pci/pcireg.h>
112 #include <dev/pci/pcivar.h>
113 #include <dev/pci/pcidevs.h>
114
115 #include <dev/pci/if_vgereg.h>
116 #include <dev/pci/if_vgevar.h>
117
118 static int vge_probe(struct device *, struct cfdata *, void *);
119 static void vge_attach(struct device *, struct device *, void *);
120
121 static int vge_encap(struct vge_softc *, struct mbuf *, int);
122
123 static int vge_allocmem(struct vge_softc *);
124 static int vge_newbuf(struct vge_softc *, int, struct mbuf *);
125 static int vge_rx_list_init(struct vge_softc *);
126 static int vge_tx_list_init(struct vge_softc *);
127 #ifndef __NO_STRICT_ALIGNMENT
128 static inline void vge_fixup_rx(struct mbuf *);
129 #endif
130 static void vge_rxeof(struct vge_softc *);
131 static void vge_txeof(struct vge_softc *);
132 static int vge_intr(void *);
133 static void vge_tick(void *);
134 static void vge_start(struct ifnet *);
135 static int vge_ioctl(struct ifnet *, u_long, caddr_t);
136 static int vge_init(struct ifnet *);
137 static void vge_stop(struct vge_softc *);
138 static void vge_watchdog(struct ifnet *);
139 #if VGE_POWER_MANAGEMENT
140 static int vge_suspend(struct device *);
141 static int vge_resume(struct device *);
142 #endif
143 static void vge_shutdown(void *);
144 static int vge_ifmedia_upd(struct ifnet *);
145 static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
146
147 static uint16_t vge_read_eeprom(struct vge_softc *, int);
148
149 static void vge_miipoll_start(struct vge_softc *);
150 static void vge_miipoll_stop(struct vge_softc *);
151 static int vge_miibus_readreg(struct device *, int, int);
152 static void vge_miibus_writereg(struct device *, int, int, int);
153 static void vge_miibus_statchg(struct device *);
154
155 static void vge_cam_clear(struct vge_softc *);
156 static int vge_cam_set(struct vge_softc *, uint8_t *);
157 static void vge_setmulti(struct vge_softc *);
158 static void vge_reset(struct vge_softc *);
159
160 #define VGE_PCI_LOIO 0x10
161 #define VGE_PCI_LOMEM 0x14
162
163 CFATTACH_DECL(vge, sizeof(struct vge_softc),
164 vge_probe, vge_attach, NULL, NULL);
165
166 /*
167 * Defragment mbuf chain contents to be as linear as possible.
168 * Returns new mbuf chain on success, NULL on failure. Old mbuf
169 * chain is always freed.
170 * XXX temporary until there would be generic function doing this.
171 */
172 #define m_defrag vge_m_defrag
173 struct mbuf * vge_m_defrag(struct mbuf *, int);
174
175 struct mbuf *
176 vge_m_defrag(struct mbuf *mold, int flags)
177 {
178 struct mbuf *m0, *mn, *n;
179 size_t sz = mold->m_pkthdr.len;
180
181 #ifdef DIAGNOSTIC
182 if ((mold->m_flags & M_PKTHDR) == 0)
183 panic("m_defrag: not a mbuf chain header");
184 #endif
185
186 MGETHDR(m0, flags, MT_DATA);
187 if (m0 == NULL)
188 return NULL;
189 m0->m_pkthdr.len = mold->m_pkthdr.len;
190 mn = m0;
191
192 do {
193 if (sz > MHLEN) {
194 MCLGET(mn, M_DONTWAIT);
195 if ((mn->m_flags & M_EXT) == 0) {
196 m_freem(m0);
197 return NULL;
198 }
199 }
200
201 mn->m_len = MIN(sz, MCLBYTES);
202
203 m_copydata(mold, mold->m_pkthdr.len - sz, mn->m_len,
204 mtod(mn, caddr_t));
205
206 sz -= mn->m_len;
207
208 if (sz > 0) {
209 /* need more mbufs */
210 MGET(n, M_NOWAIT, MT_DATA);
211 if (n == NULL) {
212 m_freem(m0);
213 return NULL;
214 }
215
216 mn->m_next = n;
217 mn = n;
218 }
219 } while (sz > 0);
220
221 return m0;
222 }
223
224 /*
225 * Read a word of data stored in the EEPROM at address 'addr.'
226 */
227 static uint16_t
228 vge_read_eeprom(struct vge_softc *sc, int addr)
229 {
230 int i;
231 uint16_t word = 0;
232
233 /*
234 * Enter EEPROM embedded programming mode. In order to
235 * access the EEPROM at all, we first have to set the
236 * EELOAD bit in the CHIPCFG2 register.
237 */
238 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
239 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
240
241 /* Select the address of the word we want to read */
242 CSR_WRITE_1(sc, VGE_EEADDR, addr);
243
244 /* Issue read command */
245 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
246
247 /* Wait for the done bit to be set. */
248 for (i = 0; i < VGE_TIMEOUT; i++) {
249 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
250 break;
251 }
252
253 if (i == VGE_TIMEOUT) {
254 printf("%s: EEPROM read timed out\n", sc->sc_dev.dv_xname);
255 return 0;
256 }
257
258 /* Read the result */
259 word = CSR_READ_2(sc, VGE_EERDDAT);
260
261 /* Turn off EEPROM access mode. */
262 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
263 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
264
265 return word;
266 }
267
268 static void
269 vge_miipoll_stop(struct vge_softc *sc)
270 {
271 int i;
272
273 CSR_WRITE_1(sc, VGE_MIICMD, 0);
274
275 for (i = 0; i < VGE_TIMEOUT; i++) {
276 DELAY(1);
277 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
278 break;
279 }
280
281 if (i == VGE_TIMEOUT) {
282 printf("%s: failed to idle MII autopoll\n",
283 sc->sc_dev.dv_xname);
284 }
285 }
286
287 static void
288 vge_miipoll_start(struct vge_softc *sc)
289 {
290 int i;
291
292 /* First, make sure we're idle. */
293
294 CSR_WRITE_1(sc, VGE_MIICMD, 0);
295 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
296
297 for (i = 0; i < VGE_TIMEOUT; i++) {
298 DELAY(1);
299 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
300 break;
301 }
302
303 if (i == VGE_TIMEOUT) {
304 printf("%s: failed to idle MII autopoll\n",
305 sc->sc_dev.dv_xname);
306 return;
307 }
308
309 /* Now enable auto poll mode. */
310
311 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
312
313 /* And make sure it started. */
314
315 for (i = 0; i < VGE_TIMEOUT; i++) {
316 DELAY(1);
317 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
318 break;
319 }
320
321 if (i == VGE_TIMEOUT) {
322 printf("%s: failed to start MII autopoll\n",
323 sc->sc_dev.dv_xname);
324 }
325 }
326
327 static int
328 vge_miibus_readreg(struct device *dev, int phy, int reg)
329 {
330 struct vge_softc *sc;
331 int i;
332 uint16_t rval;
333
334 sc = (void *)dev;
335 rval = 0;
336 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
337 return 0;
338
339 VGE_LOCK(sc);
340 vge_miipoll_stop(sc);
341
342 /* Specify the register we want to read. */
343 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
344
345 /* Issue read command. */
346 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
347
348 /* Wait for the read command bit to self-clear. */
349 for (i = 0; i < VGE_TIMEOUT; i++) {
350 DELAY(1);
351 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
352 break;
353 }
354
355 if (i == VGE_TIMEOUT)
356 printf("%s: MII read timed out\n", sc->sc_dev.dv_xname);
357 else
358 rval = CSR_READ_2(sc, VGE_MIIDATA);
359
360 vge_miipoll_start(sc);
361 VGE_UNLOCK(sc);
362
363 return rval;
364 }
365
366 static void
367 vge_miibus_writereg(struct device *dev, int phy, int reg, int data)
368 {
369 struct vge_softc *sc;
370 int i;
371
372 sc = (void *)dev;
373 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
374 return;
375
376 VGE_LOCK(sc);
377 vge_miipoll_stop(sc);
378
379 /* Specify the register we want to write. */
380 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
381
382 /* Specify the data we want to write. */
383 CSR_WRITE_2(sc, VGE_MIIDATA, data);
384
385 /* Issue write command. */
386 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
387
388 /* Wait for the write command bit to self-clear. */
389 for (i = 0; i < VGE_TIMEOUT; i++) {
390 DELAY(1);
391 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
392 break;
393 }
394
395 if (i == VGE_TIMEOUT) {
396 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
397 }
398
399 vge_miipoll_start(sc);
400 VGE_UNLOCK(sc);
401 }
402
403 static void
404 vge_cam_clear(struct vge_softc *sc)
405 {
406 int i;
407
408 /*
409 * Turn off all the mask bits. This tells the chip
410 * that none of the entries in the CAM filter are valid.
411 * desired entries will be enabled as we fill the filter in.
412 */
413
414 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
415 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
416 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
417 for (i = 0; i < 8; i++)
418 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
419
420 /* Clear the VLAN filter too. */
421
422 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
423 for (i = 0; i < 8; i++)
424 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
425
426 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
427 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
428 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
429
430 sc->vge_camidx = 0;
431 }
432
433 static int
434 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
435 {
436 int i, error;
437
438 error = 0;
439
440 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
441 return ENOSPC;
442
443 /* Select the CAM data page. */
444 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
445 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
446
447 /* Set the filter entry we want to update and enable writing. */
448 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
449
450 /* Write the address to the CAM registers */
451 for (i = 0; i < ETHER_ADDR_LEN; i++)
452 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
453
454 /* Issue a write command. */
455 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
456
457 /* Wake for it to clear. */
458 for (i = 0; i < VGE_TIMEOUT; i++) {
459 DELAY(1);
460 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
461 break;
462 }
463
464 if (i == VGE_TIMEOUT) {
465 printf("%s: setting CAM filter failed\n", sc->sc_dev.dv_xname);
466 error = EIO;
467 goto fail;
468 }
469
470 /* Select the CAM mask page. */
471 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
472 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
473
474 /* Set the mask bit that enables this filter. */
475 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx / 8),
476 1 << (sc->vge_camidx & 7));
477
478 sc->vge_camidx++;
479
480 fail:
481 /* Turn off access to CAM. */
482 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
483 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
484 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
485
486 return error;
487 }
488
489 /*
490 * Program the multicast filter. We use the 64-entry CAM filter
491 * for perfect filtering. If there's more than 64 multicast addresses,
492 * we use the hash filter instead.
493 */
494 static void
495 vge_setmulti(struct vge_softc *sc)
496 {
497 struct ifnet *ifp;
498 int error;
499 uint32_t h, hashes[2] = { 0, 0 };
500 struct ether_multi *enm;
501 struct ether_multistep step;
502
503 error = 0;
504 ifp = &sc->sc_ethercom.ec_if;
505
506 /* First, zot all the multicast entries. */
507 vge_cam_clear(sc);
508 CSR_WRITE_4(sc, VGE_MAR0, 0);
509 CSR_WRITE_4(sc, VGE_MAR1, 0);
510 ifp->if_flags &= ~IFF_ALLMULTI;
511
512 /*
513 * If the user wants allmulti or promisc mode, enable reception
514 * of all multicast frames.
515 */
516 if (ifp->if_flags & IFF_PROMISC) {
517 allmulti:
518 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
519 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
520 ifp->if_flags |= IFF_ALLMULTI;
521 return;
522 }
523
524 /* Now program new ones */
525 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
526 while (enm != NULL) {
527 /*
528 * If multicast range, fall back to ALLMULTI.
529 */
530 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
531 ETHER_ADDR_LEN) != 0)
532 goto allmulti;
533
534 error = vge_cam_set(sc, enm->enm_addrlo);
535 if (error)
536 break;
537
538 ETHER_NEXT_MULTI(step, enm);
539 }
540
541 /* If there were too many addresses, use the hash filter. */
542 if (error) {
543 vge_cam_clear(sc);
544
545 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
546 while (enm != NULL) {
547 /*
548 * If multicast range, fall back to ALLMULTI.
549 */
550 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
551 ETHER_ADDR_LEN) != 0)
552 goto allmulti;
553
554 h = ether_crc32_be(enm->enm_addrlo,
555 ETHER_ADDR_LEN) >> 26;
556 hashes[h >> 5] |= 1 << (h & 0x1f);
557
558 ETHER_NEXT_MULTI(step, enm);
559 }
560
561 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
562 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
563 }
564 }
565
566 static void
567 vge_reset(struct vge_softc *sc)
568 {
569 int i;
570
571 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
572
573 for (i = 0; i < VGE_TIMEOUT; i++) {
574 DELAY(5);
575 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
576 break;
577 }
578
579 if (i == VGE_TIMEOUT) {
580 printf("%s: soft reset timed out", sc->sc_dev.dv_xname);
581 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
582 DELAY(2000);
583 }
584
585 DELAY(5000);
586
587 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
588
589 for (i = 0; i < VGE_TIMEOUT; i++) {
590 DELAY(5);
591 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
592 break;
593 }
594
595 if (i == VGE_TIMEOUT) {
596 printf("%s: EEPROM reload timed out\n", sc->sc_dev.dv_xname);
597 return;
598 }
599
600 /*
601 * On some machine, the first read data from EEPROM could be
602 * messed up, so read one dummy data here to avoid the mess.
603 */
604 (void)vge_read_eeprom(sc, 0);
605
606 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
607 }
608
609 /*
610 * Probe for a VIA gigabit chip. Check the PCI vendor and device
611 * IDs against our list and return a device name if we find a match.
612 */
613 static int
614 vge_probe(struct device *parent __unused, struct cfdata *match __unused,
615 void *aux)
616 {
617 struct pci_attach_args *pa = aux;
618
619 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH
620 && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X)
621 return 1;
622
623 return 0;
624 }
625
626 static int
627 vge_allocmem(struct vge_softc *sc)
628 {
629 int error;
630 int nseg;
631 int i;
632 bus_dma_segment_t seg;
633
634 /*
635 * Allocate map for TX descriptor list.
636 */
637 error = bus_dmamap_create(sc->vge_dmat,
638 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT,
639 &sc->vge_ldata.vge_tx_list_map);
640 if (error) {
641 printf("%s: could not allocate TX dma list map\n",
642 sc->sc_dev.dv_xname);
643 return ENOMEM;
644 }
645
646 /*
647 * Allocate memory for TX descriptor list.
648 */
649
650 error = bus_dmamem_alloc(sc->vge_dmat, VGE_TX_LIST_SZ, VGE_RING_ALIGN,
651 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
652 if (error) {
653 printf("%s: could not allocate TX ring dma memory\n",
654 sc->sc_dev.dv_xname);
655 return ENOMEM;
656 }
657
658 /* Map the memory to kernel VA space */
659
660 error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, VGE_TX_LIST_SZ,
661 (caddr_t *)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT);
662 if (error) {
663 printf("%s: could not map TX ring dma memory\n",
664 sc->sc_dev.dv_xname);
665 return ENOMEM;
666 }
667
668 /* Load the map for the TX ring. */
669 error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_tx_list_map,
670 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
671 if (error) {
672 printf("%s: could not load TX ring dma memory\n",
673 sc->sc_dev.dv_xname);
674 return ENOMEM;
675 }
676
677 /* Create DMA maps for TX buffers */
678
679 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
680 error = bus_dmamap_create(sc->vge_dmat, VGE_TX_MAXLEN,
681 VGE_TX_FRAGS, VGE_TX_MAXLEN, 0,
682 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
683 &sc->vge_ldata.vge_tx_dmamap[i]);
684 if (error) {
685 printf("%s: can't create DMA map for TX\n",
686 sc->sc_dev.dv_xname);
687 return ENOMEM;
688 }
689 }
690
691 /*
692 * Allocate map for RX descriptor list.
693 */
694 error = bus_dmamap_create(sc->vge_dmat,
695 VGE_RX_LIST_SZ, 1, VGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT,
696 &sc->vge_ldata.vge_rx_list_map);
697 if (error) {
698 printf("%s: could not allocate RX dma list map\n",
699 sc->sc_dev.dv_xname);
700 return ENOMEM;
701 }
702
703 /* Allocate DMA'able memory for the RX ring */
704
705 error = bus_dmamem_alloc(sc->vge_dmat, VGE_RX_LIST_SZ, VGE_RING_ALIGN,
706 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
707 if (error)
708 return ENOMEM;
709
710 /* Map the memory to kernel VA space */
711
712 error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, VGE_RX_LIST_SZ,
713 (caddr_t *)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT);
714 if (error)
715 return ENOMEM;
716
717 /* Load the map for the RX ring. */
718 error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_rx_list_map,
719 sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
720 if (error) {
721 printf("%s: could not load RX ring dma memory\n",
722 sc->sc_dev.dv_xname);
723 return ENOMEM;
724 }
725
726 /* Create DMA maps for RX buffers */
727
728 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
729 error = bus_dmamap_create(sc->vge_dmat, MCLBYTES,
730 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
731 &sc->vge_ldata.vge_rx_dmamap[i]);
732 if (error) {
733 printf("%s: can't create DMA map for RX\n",
734 sc->sc_dev.dv_xname);
735 return ENOMEM;
736 }
737 }
738
739 return 0;
740 }
741
742 /*
743 * Attach the interface. Allocate softc structures, do ifmedia
744 * setup and ethernet/BPF attach.
745 */
746 static void
747 vge_attach(struct device *parent __unused, struct device *self, void *aux)
748 {
749 uint8_t *eaddr;
750 struct vge_softc *sc = (struct vge_softc *)self;
751 struct ifnet *ifp;
752 struct pci_attach_args *pa = aux;
753 pci_chipset_tag_t pc = pa->pa_pc;
754 const char *intrstr;
755 pci_intr_handle_t ih;
756 uint16_t val;
757
758 aprint_normal(": VIA VT612X Gigabit Ethernet (rev. %#x)\n",
759 PCI_REVISION(pa->pa_class));
760
761 /* Make sure bus-mastering is enabled */
762 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
763 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
764 PCI_COMMAND_MASTER_ENABLE);
765
766 /*
767 * Map control/status registers.
768 */
769 if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
770 &sc->vge_btag, &sc->vge_bhandle, NULL, NULL) != 0) {
771 aprint_error("%s: couldn't map memory\n", sc->sc_dev.dv_xname);
772 return;
773 }
774
775 /*
776 * Map and establish our interrupt.
777 */
778 if (pci_intr_map(pa, &ih)) {
779 aprint_error("%s: unable to map interrupt\n",
780 sc->sc_dev.dv_xname);
781 return;
782 }
783 intrstr = pci_intr_string(pc, ih);
784 sc->vge_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc);
785 if (sc->vge_intrhand == NULL) {
786 printf("%s: unable to establish interrupt",
787 sc->sc_dev.dv_xname);
788 if (intrstr != NULL)
789 printf(" at %s", intrstr);
790 printf("\n");
791 return;
792 }
793 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
794
795 /* Reset the adapter. */
796 vge_reset(sc);
797
798 /*
799 * Get station address from the EEPROM.
800 */
801 eaddr = sc->vge_eaddr;
802 val = vge_read_eeprom(sc, VGE_EE_EADDR + 0);
803 eaddr[0] = val & 0xff;
804 eaddr[1] = val >> 8;
805 val = vge_read_eeprom(sc, VGE_EE_EADDR + 1);
806 eaddr[2] = val & 0xff;
807 eaddr[3] = val >> 8;
808 val = vge_read_eeprom(sc, VGE_EE_EADDR + 2);
809 eaddr[4] = val & 0xff;
810 eaddr[5] = val >> 8;
811
812 printf("%s: Ethernet address: %s\n", sc->sc_dev.dv_xname,
813 ether_sprintf(eaddr));
814
815 /*
816 * Use the 32bit tag. Hardware supports 48bit physical addresses,
817 * but we don't use that for now.
818 */
819 sc->vge_dmat = pa->pa_dmat;
820
821 if (vge_allocmem(sc))
822 return;
823
824 ifp = &sc->sc_ethercom.ec_if;
825 ifp->if_softc = sc;
826 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
827 ifp->if_mtu = ETHERMTU;
828 ifp->if_baudrate = IF_Gbps(1);
829 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
830 ifp->if_ioctl = vge_ioctl;
831 ifp->if_start = vge_start;
832
833 /*
834 * We can support 802.1Q VLAN-sized frames and jumbo
835 * Ethernet frames.
836 */
837 sc->sc_ethercom.ec_capabilities |=
838 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU |
839 ETHERCAP_VLAN_HWTAGGING;
840
841 /*
842 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
843 */
844 ifp->if_capabilities |=
845 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
846 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
847 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
848
849 #ifdef DEVICE_POLLING
850 #ifdef IFCAP_POLLING
851 ifp->if_capabilities |= IFCAP_POLLING;
852 #endif
853 #endif
854 ifp->if_watchdog = vge_watchdog;
855 ifp->if_init = vge_init;
856 IFQ_SET_MAXLEN(&ifp->if_snd, max(VGE_IFQ_MAXLEN, IFQ_MAXLEN));
857
858 /*
859 * Initialize our media structures and probe the MII.
860 */
861 sc->sc_mii.mii_ifp = ifp;
862 sc->sc_mii.mii_readreg = vge_miibus_readreg;
863 sc->sc_mii.mii_writereg = vge_miibus_writereg;
864 sc->sc_mii.mii_statchg = vge_miibus_statchg;
865 ifmedia_init(&sc->sc_mii.mii_media, 0, vge_ifmedia_upd,
866 vge_ifmedia_sts);
867 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
868 MII_OFFSET_ANY, MIIF_DOPAUSE);
869 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
870 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
871 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
872 } else
873 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
874
875 /*
876 * Attach the interface.
877 */
878 if_attach(ifp);
879 ether_ifattach(ifp, eaddr);
880
881 callout_init(&sc->vge_timeout);
882 callout_setfunc(&sc->vge_timeout, vge_tick, sc);
883
884 /*
885 * Make sure the interface is shutdown during reboot.
886 */
887 if (shutdownhook_establish(vge_shutdown, sc) == NULL) {
888 printf("%s: WARNING: unable to establish shutdown hook\n",
889 sc->sc_dev.dv_xname);
890 }
891 }
892
893 static int
894 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m)
895 {
896 struct vge_rx_desc *d;
897 struct mbuf *m_new;
898 bus_dmamap_t map;
899 int i;
900
901 m_new = NULL;
902 if (m == NULL) {
903 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
904 if (m_new == NULL)
905 return ENOBUFS;
906
907 MCLGET(m_new, M_DONTWAIT);
908 if ((m_new->m_flags & M_EXT) == 0) {
909 m_freem(m_new);
910 return ENOBUFS;
911 }
912
913 m = m_new;
914 } else
915 m->m_data = m->m_ext.ext_buf;
916
917
918 #ifndef __NO_STRICT_ALIGNMENT
919 /*
920 * This is part of an evil trick to deal with non-x86 platforms.
921 * The VIA chip requires RX buffers to be aligned on 32-bit
922 * boundaries, but that will hose non-x86 machines. To get around
923 * this, we leave some empty space at the start of each buffer
924 * and for non-x86 hosts, we copy the buffer back two bytes
925 * to achieve word alignment. This is slightly more efficient
926 * than allocating a new buffer, copying the contents, and
927 * discarding the old buffer.
928 */
929 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
930 m->m_data += VGE_ETHER_ALIGN;
931 #else
932 m->m_len = m->m_pkthdr.len = MCLBYTES;
933 #endif
934 map = sc->vge_ldata.vge_rx_dmamap[idx];
935
936 if (bus_dmamap_load_mbuf(sc->vge_dmat, map, m, BUS_DMA_NOWAIT) != 0)
937 goto out;
938
939 d = &sc->vge_ldata.vge_rx_list[idx];
940
941 /* If this descriptor is still owned by the chip, bail. */
942
943 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
944 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
945 printf("%s: tried to map busy descriptor\n",
946 sc->sc_dev.dv_xname);
947 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
948 goto out;
949 }
950
951 d->vge_buflen =
952 htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I);
953 d->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[0].ds_addr));
954 d->vge_addrhi = htole16(VGE_ADDR_HI(map->dm_segs[0].ds_addr) & 0xFFFF);
955 d->vge_sts = 0;
956 d->vge_ctl = 0;
957 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
958
959 bus_dmamap_sync(sc->vge_dmat,
960 sc->vge_ldata.vge_rx_dmamap[idx],
961 0, sc->vge_ldata.vge_rx_dmamap[idx]->dm_mapsize,
962 BUS_DMASYNC_PREREAD);
963
964 /*
965 * Note: the manual fails to document the fact that for
966 * proper opration, the driver needs to replentish the RX
967 * DMA ring 4 descriptors at a time (rather than one at a
968 * time, like most chips). We can allocate the new buffers
969 * but we should not set the OWN bits until we're ready
970 * to hand back 4 of them in one shot.
971 */
972
973 #define VGE_RXCHUNK 4
974 sc->vge_rx_consumed++;
975 if (sc->vge_rx_consumed == VGE_RXCHUNK) {
976 for (i = idx; i != idx - sc->vge_rx_consumed; i--) {
977 sc->vge_ldata.vge_rx_list[i].vge_sts |=
978 htole32(VGE_RDSTS_OWN);
979 VGE_RXDESCSYNC(sc, i,
980 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
981 }
982 sc->vge_rx_consumed = 0;
983 }
984
985 sc->vge_ldata.vge_rx_mbuf[idx] = m;
986
987 return 0;
988 out:
989 if (m_new != NULL)
990 m_freem(m_new);
991 return ENOMEM;
992 }
993
994 static int
995 vge_tx_list_init(struct vge_softc *sc)
996 {
997
998 memset((char *)sc->vge_ldata.vge_tx_list, 0, VGE_TX_LIST_SZ);
999 bus_dmamap_sync(sc->vge_dmat, sc->vge_ldata.vge_tx_list_map,
1000 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
1001 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1002
1003 memset((char *)&sc->vge_ldata.vge_tx_mbuf, 0,
1004 (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
1005
1006 sc->vge_ldata.vge_tx_prodidx = 0;
1007 sc->vge_ldata.vge_tx_considx = 0;
1008 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
1009
1010 return 0;
1011 }
1012
1013 static int
1014 vge_rx_list_init(struct vge_softc *sc)
1015 {
1016 int i;
1017
1018 memset((char *)sc->vge_ldata.vge_rx_list, 0, VGE_RX_LIST_SZ);
1019 memset((char *)&sc->vge_ldata.vge_rx_mbuf, 0,
1020 (VGE_RX_DESC_CNT * sizeof(struct mbuf *)));
1021
1022 sc->vge_rx_consumed = 0;
1023
1024 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1025 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
1026 return (ENOBUFS);
1027 }
1028
1029 sc->vge_ldata.vge_rx_prodidx = 0;
1030 sc->vge_rx_consumed = 0;
1031 sc->vge_head = sc->vge_tail = NULL;
1032
1033 return 0;
1034 }
1035
1036 #ifndef __NO_STRICT_ALIGNMENT
1037 static inline void
1038 vge_fixup_rx(struct mbuf *m)
1039 {
1040 int i;
1041 uint16_t *src, *dst;
1042
1043 src = mtod(m, uint16_t *);
1044 dst = src - 1;
1045
1046 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1047 *dst++ = *src++;
1048
1049 m->m_data -= ETHER_ALIGN;
1050 }
1051 #endif
1052
1053 /*
1054 * RX handler. We support the reception of jumbo frames that have
1055 * been fragmented across multiple 2K mbuf cluster buffers.
1056 */
1057 static void
1058 vge_rxeof(struct vge_softc *sc)
1059 {
1060 struct mbuf *m;
1061 struct ifnet *ifp;
1062 int idx, total_len, lim;
1063 struct vge_rx_desc *cur_rx;
1064 uint32_t rxstat, rxctl;
1065
1066 VGE_LOCK_ASSERT(sc);
1067 ifp = &sc->sc_ethercom.ec_if;
1068 idx = sc->vge_ldata.vge_rx_prodidx;
1069 lim = 0;
1070
1071 /* Invalidate the descriptor memory */
1072
1073 for (;;) {
1074 cur_rx = &sc->vge_ldata.vge_rx_list[idx];
1075
1076 VGE_RXDESCSYNC(sc, idx,
1077 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1078 rxstat = le32toh(cur_rx->vge_sts);
1079 if ((rxstat & VGE_RDSTS_OWN) != 0) {
1080 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1081 break;
1082 }
1083
1084 #ifdef DEVICE_POLLING
1085 if (ifp->if_flags & IFF_POLLING) {
1086 if (sc->rxcycles <= 0)
1087 break;
1088 sc->rxcycles--;
1089 }
1090 #endif /* DEVICE_POLLING */
1091
1092 m = sc->vge_ldata.vge_rx_mbuf[idx];
1093 total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16;
1094 rxctl = le32toh(cur_rx->vge_ctl);
1095
1096 /* Invalidate the RX mbuf and unload its map */
1097
1098 bus_dmamap_sync(sc->vge_dmat,
1099 sc->vge_ldata.vge_rx_dmamap[idx],
1100 0, sc->vge_ldata.vge_rx_dmamap[idx]->dm_mapsize,
1101 BUS_DMASYNC_POSTREAD);
1102 bus_dmamap_unload(sc->vge_dmat,
1103 sc->vge_ldata.vge_rx_dmamap[idx]);
1104
1105 /*
1106 * If the 'start of frame' bit is set, this indicates
1107 * either the first fragment in a multi-fragment receive,
1108 * or an intermediate fragment. Either way, we want to
1109 * accumulate the buffers.
1110 */
1111 if (rxstat & VGE_RXPKT_SOF) {
1112 m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
1113 if (sc->vge_head == NULL)
1114 sc->vge_head = sc->vge_tail = m;
1115 else {
1116 m->m_flags &= ~M_PKTHDR;
1117 sc->vge_tail->m_next = m;
1118 sc->vge_tail = m;
1119 }
1120 vge_newbuf(sc, idx, NULL);
1121 VGE_RX_DESC_INC(idx);
1122 continue;
1123 }
1124
1125 /*
1126 * Bad/error frames will have the RXOK bit cleared.
1127 * However, there's one error case we want to allow:
1128 * if a VLAN tagged frame arrives and the chip can't
1129 * match it against the CAM filter, it considers this
1130 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1131 * We don't want to drop the frame though: our VLAN
1132 * filtering is done in software.
1133 */
1134 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM)
1135 && !(rxstat & VGE_RDSTS_CSUMERR)) {
1136 ifp->if_ierrors++;
1137 /*
1138 * If this is part of a multi-fragment packet,
1139 * discard all the pieces.
1140 */
1141 if (sc->vge_head != NULL) {
1142 m_freem(sc->vge_head);
1143 sc->vge_head = sc->vge_tail = NULL;
1144 }
1145 vge_newbuf(sc, idx, m);
1146 VGE_RX_DESC_INC(idx);
1147 continue;
1148 }
1149
1150 /*
1151 * If allocating a replacement mbuf fails,
1152 * reload the current one.
1153 */
1154
1155 if (vge_newbuf(sc, idx, NULL)) {
1156 ifp->if_ierrors++;
1157 if (sc->vge_head != NULL) {
1158 m_freem(sc->vge_head);
1159 sc->vge_head = sc->vge_tail = NULL;
1160 }
1161 vge_newbuf(sc, idx, m);
1162 VGE_RX_DESC_INC(idx);
1163 continue;
1164 }
1165
1166 VGE_RX_DESC_INC(idx);
1167
1168 if (sc->vge_head != NULL) {
1169 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
1170 /*
1171 * Special case: if there's 4 bytes or less
1172 * in this buffer, the mbuf can be discarded:
1173 * the last 4 bytes is the CRC, which we don't
1174 * care about anyway.
1175 */
1176 if (m->m_len <= ETHER_CRC_LEN) {
1177 sc->vge_tail->m_len -=
1178 (ETHER_CRC_LEN - m->m_len);
1179 m_freem(m);
1180 } else {
1181 m->m_len -= ETHER_CRC_LEN;
1182 m->m_flags &= ~M_PKTHDR;
1183 sc->vge_tail->m_next = m;
1184 }
1185 m = sc->vge_head;
1186 sc->vge_head = sc->vge_tail = NULL;
1187 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1188 } else
1189 m->m_pkthdr.len = m->m_len =
1190 (total_len - ETHER_CRC_LEN);
1191
1192 #ifndef __NO_STRICT_ALIGNMENT
1193 vge_fixup_rx(m);
1194 #endif
1195 ifp->if_ipackets++;
1196 m->m_pkthdr.rcvif = ifp;
1197
1198 /* Do RX checksumming if enabled */
1199 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) {
1200
1201 /* Check IP header checksum */
1202 if (rxctl & VGE_RDCTL_IPPKT)
1203 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1204 if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0)
1205 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1206 }
1207
1208 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
1209 /* Check UDP checksum */
1210 if (rxctl & VGE_RDCTL_TCPPKT)
1211 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1212
1213 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1214 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1215 }
1216
1217 if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) {
1218 /* Check UDP checksum */
1219 if (rxctl & VGE_RDCTL_UDPPKT)
1220 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1221
1222 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1223 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1224 }
1225
1226 if (rxstat & VGE_RDSTS_VTAG) {
1227 /*
1228 * We use bswap16() here because:
1229 * On LE machines, tag is stored in BE as stream data.
1230 * On BE machines, tag is stored in BE as stream data
1231 * but it was already swapped by le32toh() above.
1232 */
1233 VLAN_INPUT_TAG(ifp, m,
1234 bswap16(rxctl & VGE_RDCTL_VLANID), continue);
1235 }
1236
1237 #if NBPFILTER > 0
1238 /*
1239 * Handle BPF listeners.
1240 */
1241 if (ifp->if_bpf)
1242 bpf_mtap(ifp->if_bpf, m);
1243 #endif
1244
1245 VGE_UNLOCK(sc);
1246 (*ifp->if_input)(ifp, m);
1247 VGE_LOCK(sc);
1248
1249 lim++;
1250 if (lim == VGE_RX_DESC_CNT)
1251 break;
1252
1253 }
1254
1255 sc->vge_ldata.vge_rx_prodidx = idx;
1256 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1257 }
1258
1259 static void
1260 vge_txeof(struct vge_softc *sc)
1261 {
1262 struct ifnet *ifp;
1263 uint32_t txstat;
1264 int idx;
1265
1266 ifp = &sc->sc_ethercom.ec_if;
1267 idx = sc->vge_ldata.vge_tx_considx;
1268
1269 while (idx != sc->vge_ldata.vge_tx_prodidx) {
1270 VGE_TXDESCSYNC(sc, idx,
1271 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1272
1273 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
1274 if (txstat & VGE_TDSTS_OWN) {
1275 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1276 break;
1277 }
1278
1279 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
1280 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
1281 bus_dmamap_unload(sc->vge_dmat,
1282 sc->vge_ldata.vge_tx_dmamap[idx]);
1283 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1284 ifp->if_collisions++;
1285 if (txstat & VGE_TDSTS_TXERR)
1286 ifp->if_oerrors++;
1287 else
1288 ifp->if_opackets++;
1289
1290 sc->vge_ldata.vge_tx_free++;
1291 VGE_TX_DESC_INC(idx);
1292 }
1293
1294 /* No changes made to the TX ring, so no flush needed */
1295
1296 if (idx != sc->vge_ldata.vge_tx_considx) {
1297 sc->vge_ldata.vge_tx_considx = idx;
1298 ifp->if_flags &= ~IFF_OACTIVE;
1299 ifp->if_timer = 0;
1300 }
1301
1302 /*
1303 * If not all descriptors have been released reaped yet,
1304 * reload the timer so that we will eventually get another
1305 * interrupt that will cause us to re-enter this routine.
1306 * This is done in case the transmitter has gone idle.
1307 */
1308 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) {
1309 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1310 }
1311 }
1312
1313 static void
1314 vge_tick(void *xsc)
1315 {
1316 struct vge_softc *sc;
1317 struct ifnet *ifp;
1318 struct mii_data *mii;
1319 int s;
1320
1321 sc = xsc;
1322 ifp = &sc->sc_ethercom.ec_if;
1323 mii = &sc->sc_mii;
1324
1325 s = splnet();
1326
1327 VGE_LOCK(sc);
1328
1329 callout_schedule(&sc->vge_timeout, hz);
1330
1331 mii_tick(mii);
1332 if (sc->vge_link) {
1333 if (!(mii->mii_media_status & IFM_ACTIVE))
1334 sc->vge_link = 0;
1335 } else {
1336 if (mii->mii_media_status & IFM_ACTIVE &&
1337 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1338 sc->vge_link = 1;
1339 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1340 vge_start(ifp);
1341 }
1342 }
1343
1344 VGE_UNLOCK(sc);
1345
1346 splx(s);
1347 }
1348
1349 #ifdef DEVICE_POLLING
1350 static void
1351 vge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1352 {
1353 struct vge_softc *sc = ifp->if_softc;
1354
1355 VGE_LOCK(sc);
1356 #ifdef IFCAP_POLLING
1357 if (!(ifp->if_capenable & IFCAP_POLLING)) {
1358 ether_poll_deregister(ifp);
1359 cmd = POLL_DEREGISTER;
1360 }
1361 #endif
1362 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1363 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1364 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
1365 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1366 goto done;
1367 }
1368
1369 sc->rxcycles = count;
1370 vge_rxeof(sc);
1371 vge_txeof(sc);
1372
1373 #if __FreeBSD_version < 502114
1374 if (ifp->if_snd.ifq_head != NULL)
1375 #else
1376 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1377 #endif
1378 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
1379
1380 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1381 uint32_t status;
1382 status = CSR_READ_4(sc, VGE_ISR);
1383 if (status == 0xFFFFFFFF)
1384 goto done;
1385 if (status)
1386 CSR_WRITE_4(sc, VGE_ISR, status);
1387
1388 /*
1389 * XXX check behaviour on receiver stalls.
1390 */
1391
1392 if (status & VGE_ISR_TXDMA_STALL ||
1393 status & VGE_ISR_RXDMA_STALL)
1394 vge_init(sc);
1395
1396 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1397 vge_rxeof(sc);
1398 ifp->if_ierrors++;
1399 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1400 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1401 }
1402 }
1403 done:
1404 VGE_UNLOCK(sc);
1405 }
1406 #endif /* DEVICE_POLLING */
1407
1408 static int
1409 vge_intr(void *arg)
1410 {
1411 struct vge_softc *sc;
1412 struct ifnet *ifp;
1413 uint32_t status;
1414 int claim;
1415
1416 sc = arg;
1417 claim = 0;
1418 if (sc->suspended) {
1419 return claim;
1420 }
1421
1422 ifp = &sc->sc_ethercom.ec_if;
1423
1424 VGE_LOCK(sc);
1425
1426 if (!(ifp->if_flags & IFF_UP)) {
1427 VGE_UNLOCK(sc);
1428 return claim;
1429 }
1430
1431 #ifdef DEVICE_POLLING
1432 if (ifp->if_flags & IFF_POLLING)
1433 goto done;
1434 if (
1435 #ifdef IFCAP_POLLING
1436 (ifp->if_capenable & IFCAP_POLLING) &&
1437 #endif
1438 ether_poll_register(vge_poll, ifp)) { /* ok, disable interrupts */
1439 CSR_WRITE_4(sc, VGE_IMR, 0);
1440 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1441 vge_poll(ifp, 0, 1);
1442 goto done;
1443 }
1444
1445 #endif /* DEVICE_POLLING */
1446
1447 /* Disable interrupts */
1448 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1449
1450 for (;;) {
1451
1452 status = CSR_READ_4(sc, VGE_ISR);
1453 /* If the card has gone away the read returns 0xffff. */
1454 if (status == 0xFFFFFFFF)
1455 break;
1456
1457 if (status) {
1458 claim = 1;
1459 CSR_WRITE_4(sc, VGE_ISR, status);
1460 }
1461
1462 if ((status & VGE_INTRS) == 0)
1463 break;
1464
1465 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1466 vge_rxeof(sc);
1467
1468 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1469 vge_rxeof(sc);
1470 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1471 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1472 }
1473
1474 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1475 vge_txeof(sc);
1476
1477 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1478 vge_init(ifp);
1479
1480 if (status & VGE_ISR_LINKSTS)
1481 vge_tick(sc);
1482 }
1483
1484 /* Re-enable interrupts */
1485 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1486
1487 #ifdef DEVICE_POLLING
1488 done:
1489 #endif
1490 VGE_UNLOCK(sc);
1491
1492 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1493 vge_start(ifp);
1494
1495 return claim;
1496 }
1497
1498 static int
1499 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx)
1500 {
1501 struct vge_tx_desc *d;
1502 struct vge_tx_frag *f;
1503 struct mbuf *m_new;
1504 bus_dmamap_t map;
1505 int seg, error, flags;
1506 struct m_tag *mtag;
1507 size_t sz;
1508
1509 d = &sc->vge_ldata.vge_tx_list[idx];
1510
1511 /* If this descriptor is still owned by the chip, bail. */
1512 if (sc->vge_ldata.vge_tx_free <= 2) {
1513 VGE_TXDESCSYNC(sc, idx,
1514 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1515 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) {
1516 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1517 return ENOBUFS;
1518 }
1519 }
1520
1521 map = sc->vge_ldata.vge_tx_dmamap[idx];
1522 error = bus_dmamap_load_mbuf(sc->vge_dmat, map, m_head, BUS_DMA_NOWAIT);
1523
1524 /* If too many segments to map, coalesce */
1525 if (error == EFBIG) {
1526 m_new = m_defrag(m_head, M_DONTWAIT);
1527 if (m_new == NULL)
1528 return (error);
1529
1530 error = bus_dmamap_load_mbuf(sc->vge_dmat, map,
1531 m_new, BUS_DMA_NOWAIT);
1532 if (error) {
1533 m_freem(m_new);
1534 return error;
1535 }
1536
1537 m_head = m_new;
1538 } else if (error)
1539 return error;
1540
1541 for (seg = 0, f = &d->vge_frag[0]; seg < map->dm_nsegs; seg++, f++) {
1542 f->vge_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len));
1543 f->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[seg].ds_addr));
1544 f->vge_addrhi = htole16(VGE_ADDR_HI(map->dm_segs[seg].ds_addr));
1545 }
1546
1547 /* Argh. This chip does not autopad short frames */
1548
1549 sz = m_head->m_pkthdr.len;
1550 if (m_head->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1551 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - sz));
1552 f->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[0].ds_addr));
1553 f->vge_addrhi =
1554 htole16(VGE_ADDR_HI(map->dm_segs[0].ds_addr) & 0xFFFF);
1555 sz = VGE_MIN_FRAMELEN;
1556 seg++;
1557 }
1558 VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE);
1559
1560 /*
1561 * When telling the chip how many segments there are, we
1562 * must use nsegs + 1 instead of just nsegs. Darned if I
1563 * know why.
1564 */
1565 seg++;
1566
1567 flags = 0;
1568 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
1569 flags |= VGE_TDCTL_IPCSUM;
1570 if (m_head->m_pkthdr.csum_flags & M_CSUM_TCPv4)
1571 flags |= VGE_TDCTL_TCPCSUM;
1572 if (m_head->m_pkthdr.csum_flags & M_CSUM_UDPv4)
1573 flags |= VGE_TDCTL_UDPCSUM;
1574 d->vge_sts = htole32(sz << 16);
1575 d->vge_ctl = htole32(flags | (seg << 28) | VGE_TD_LS_NORM);
1576
1577 if (sz > ETHERMTU + ETHER_HDR_LEN)
1578 d->vge_ctl |= htole32(VGE_TDCTL_JUMBO);
1579
1580 bus_dmamap_sync(sc->vge_dmat, map, 0, map->dm_mapsize,
1581 BUS_DMASYNC_PREWRITE);
1582
1583 sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
1584 sc->vge_ldata.vge_tx_free--;
1585
1586 /*
1587 * Set up hardware VLAN tagging.
1588 */
1589
1590 mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m_head);
1591 if (mtag != NULL) {
1592 /*
1593 * No need htons() here since vge(4) chip assumes
1594 * that tags are written in little endian and
1595 * we already use htole32() here.
1596 */
1597 d->vge_ctl |= htole32(VLAN_TAG_VALUE(mtag) | VGE_TDCTL_VTAG);
1598 }
1599
1600 d->vge_sts |= htole32(VGE_TDSTS_OWN);
1601
1602 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1603
1604 return 0;
1605 }
1606
1607 /*
1608 * Main transmit routine.
1609 */
1610
1611 static void
1612 vge_start(struct ifnet *ifp)
1613 {
1614 struct vge_softc *sc;
1615 struct mbuf *m_head;
1616 int idx, pidx, error;
1617
1618 sc = ifp->if_softc;
1619 VGE_LOCK(sc);
1620
1621 if (!sc->vge_link ||
1622 (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
1623 VGE_UNLOCK(sc);
1624 return;
1625 }
1626
1627 m_head = NULL;
1628 idx = sc->vge_ldata.vge_tx_prodidx;
1629
1630 pidx = idx - 1;
1631 if (pidx < 0)
1632 pidx = VGE_TX_DESC_CNT - 1;
1633
1634 /*
1635 * Loop through the send queue, setting up transmit descriptors
1636 * until we drain the queue, or use up all available transmit
1637 * descriptors.
1638 */
1639 for (;;) {
1640 /* Grab a packet off the queue. */
1641 IFQ_POLL(&ifp->if_snd, m_head);
1642 if (m_head == NULL)
1643 break;
1644
1645 if (sc->vge_ldata.vge_tx_mbuf[idx] != NULL) {
1646 /*
1647 * Slot already used, stop for now.
1648 */
1649 ifp->if_flags |= IFF_OACTIVE;
1650 break;
1651 }
1652
1653 if ((error = vge_encap(sc, m_head, idx))) {
1654 if (error == EFBIG) {
1655 printf("%s: Tx packet consumes too many "
1656 "DMA segments, dropping...\n",
1657 sc->sc_dev.dv_xname);
1658 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1659 m_freem(m_head);
1660 continue;
1661 }
1662
1663 /*
1664 * Short on resources, just stop for now.
1665 */
1666 if (error == ENOBUFS)
1667 ifp->if_flags |= IFF_OACTIVE;
1668 break;
1669 }
1670
1671 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1672
1673 /*
1674 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1675 */
1676
1677 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
1678 htole16(VGE_TXDESC_Q);
1679 VGE_TXDESCSYNC(sc, pidx,
1680 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1681
1682 if (sc->vge_ldata.vge_tx_mbuf[idx] != m_head) {
1683 m_freem(m_head);
1684 m_head = sc->vge_ldata.vge_tx_mbuf[idx];
1685 }
1686
1687 pidx = idx;
1688 VGE_TX_DESC_INC(idx);
1689
1690 /*
1691 * If there's a BPF listener, bounce a copy of this frame
1692 * to him.
1693 */
1694 #if NBPFILTER > 0
1695 if (ifp->if_bpf)
1696 bpf_mtap(ifp->if_bpf, m_head);
1697 #endif
1698 }
1699
1700 if (idx == sc->vge_ldata.vge_tx_prodidx) {
1701 VGE_UNLOCK(sc);
1702 return;
1703 }
1704
1705 /* Issue a transmit command. */
1706 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1707
1708 sc->vge_ldata.vge_tx_prodidx = idx;
1709
1710 /*
1711 * Use the countdown timer for interrupt moderation.
1712 * 'TX done' interrupts are disabled. Instead, we reset the
1713 * countdown timer, which will begin counting until it hits
1714 * the value in the SSTIMER register, and then trigger an
1715 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1716 * the timer count is reloaded. Only when the transmitter
1717 * is idle will the timer hit 0 and an interrupt fire.
1718 */
1719 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1720
1721 VGE_UNLOCK(sc);
1722
1723 /*
1724 * Set a timeout in case the chip goes out to lunch.
1725 */
1726 ifp->if_timer = 5;
1727 }
1728
1729 static int
1730 vge_init(struct ifnet *ifp)
1731 {
1732 struct vge_softc *sc;
1733 int i;
1734
1735 sc = ifp->if_softc;
1736
1737 VGE_LOCK(sc);
1738
1739 /*
1740 * Cancel pending I/O and free all RX/TX buffers.
1741 */
1742 vge_stop(sc);
1743 vge_reset(sc);
1744
1745 /*
1746 * Initialize the RX and TX descriptors and mbufs.
1747 */
1748
1749 vge_rx_list_init(sc);
1750 vge_tx_list_init(sc);
1751
1752 /* Set our station address */
1753 for (i = 0; i < ETHER_ADDR_LEN; i++)
1754 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->vge_eaddr[i]);
1755
1756 /*
1757 * Set receive FIFO threshold. Also allow transmission and
1758 * reception of VLAN tagged frames.
1759 */
1760 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1761 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1762
1763 /* Set DMA burst length */
1764 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1765 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1766
1767 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1768
1769 /* Set collision backoff algorithm */
1770 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1771 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1772 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1773
1774 /* Disable LPSEL field in priority resolution */
1775 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1776
1777 /*
1778 * Load the addresses of the DMA queues into the chip.
1779 * Note that we only use one transmit queue.
1780 */
1781
1782 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
1783 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_map->dm_segs[0].ds_addr));
1784 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
1785
1786 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
1787 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_map->dm_segs[0].ds_addr));
1788 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
1789 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
1790
1791 /* Enable and wake up the RX descriptor queue */
1792 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1793 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1794
1795 /* Enable the TX descriptor queue */
1796 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1797
1798 /* Set up the receive filter -- allow large frames for VLANs. */
1799 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1800
1801 /* If we want promiscuous mode, set the allframes bit. */
1802 if (ifp->if_flags & IFF_PROMISC) {
1803 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1804 }
1805
1806 /* Set capture broadcast bit to capture broadcast frames. */
1807 if (ifp->if_flags & IFF_BROADCAST) {
1808 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1809 }
1810
1811 /* Set multicast bit to capture multicast frames. */
1812 if (ifp->if_flags & IFF_MULTICAST) {
1813 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1814 }
1815
1816 /* Init the cam filter. */
1817 vge_cam_clear(sc);
1818
1819 /* Init the multicast filter. */
1820 vge_setmulti(sc);
1821
1822 /* Enable flow control */
1823
1824 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1825
1826 /* Enable jumbo frame reception (if desired) */
1827
1828 /* Start the MAC. */
1829 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1830 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1831 CSR_WRITE_1(sc, VGE_CRS0,
1832 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1833
1834 /*
1835 * Configure one-shot timer for microsecond
1836 * resulution and load it for 500 usecs.
1837 */
1838 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
1839 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
1840
1841 /*
1842 * Configure interrupt moderation for receive. Enable
1843 * the holdoff counter and load it, and set the RX
1844 * suppression count to the number of descriptors we
1845 * want to allow before triggering an interrupt.
1846 * The holdoff timer is in units of 20 usecs.
1847 */
1848
1849 #ifdef notyet
1850 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
1851 /* Select the interrupt holdoff timer page. */
1852 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1853 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
1854 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
1855
1856 /* Enable use of the holdoff timer. */
1857 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
1858 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
1859
1860 /* Select the RX suppression threshold page. */
1861 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1862 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
1863 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
1864
1865 /* Restore the page select bits. */
1866 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1867 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
1868 #endif
1869
1870 #ifdef DEVICE_POLLING
1871 /*
1872 * Disable interrupts if we are polling.
1873 */
1874 if (ifp->if_flags & IFF_POLLING) {
1875 CSR_WRITE_4(sc, VGE_IMR, 0);
1876 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1877 } else /* otherwise ... */
1878 #endif /* DEVICE_POLLING */
1879 {
1880 /*
1881 * Enable interrupts.
1882 */
1883 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1884 CSR_WRITE_4(sc, VGE_ISR, 0);
1885 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1886 }
1887
1888 mii_mediachg(&sc->sc_mii);
1889
1890 ifp->if_flags |= IFF_RUNNING;
1891 ifp->if_flags &= ~IFF_OACTIVE;
1892
1893 sc->vge_if_flags = 0;
1894 sc->vge_link = 0;
1895
1896 VGE_UNLOCK(sc);
1897
1898 callout_schedule(&sc->vge_timeout, hz);
1899
1900 return 0;
1901 }
1902
1903 /*
1904 * Set media options.
1905 */
1906 static int
1907 vge_ifmedia_upd(struct ifnet *ifp)
1908 {
1909 struct vge_softc *sc;
1910
1911 sc = ifp->if_softc;
1912 mii_mediachg(&sc->sc_mii);
1913
1914 return 0;
1915 }
1916
1917 /*
1918 * Report current media status.
1919 */
1920 static void
1921 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1922 {
1923 struct vge_softc *sc;
1924 struct mii_data *mii;
1925
1926 sc = ifp->if_softc;
1927 mii = &sc->sc_mii;
1928
1929 mii_pollstat(mii);
1930 ifmr->ifm_active = mii->mii_media_active;
1931 ifmr->ifm_status = mii->mii_media_status;
1932 }
1933
1934 static void
1935 vge_miibus_statchg(struct device *self)
1936 {
1937 struct vge_softc *sc;
1938 struct mii_data *mii;
1939 struct ifmedia_entry *ife;
1940
1941 sc = (void *)self;
1942 mii = &sc->sc_mii;
1943 ife = mii->mii_media.ifm_cur;
1944 /*
1945 * If the user manually selects a media mode, we need to turn
1946 * on the forced MAC mode bit in the DIAGCTL register. If the
1947 * user happens to choose a full duplex mode, we also need to
1948 * set the 'force full duplex' bit. This applies only to
1949 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
1950 * mode is disabled, and in 1000baseT mode, full duplex is
1951 * always implied, so we turn on the forced mode bit but leave
1952 * the FDX bit cleared.
1953 */
1954
1955 switch (IFM_SUBTYPE(ife->ifm_media)) {
1956 case IFM_AUTO:
1957 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1958 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1959 break;
1960 case IFM_1000_T:
1961 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1962 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1963 break;
1964 case IFM_100_TX:
1965 case IFM_10_T:
1966 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1967 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
1968 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1969 } else {
1970 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1971 }
1972 break;
1973 default:
1974 printf("%s: unknown media type: %x\n",
1975 sc->sc_dev.dv_xname,
1976 IFM_SUBTYPE(ife->ifm_media));
1977 break;
1978 }
1979 }
1980
1981 static int
1982 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1983 {
1984 struct vge_softc *sc;
1985 struct ifreq *ifr;
1986 struct mii_data *mii;
1987 int s, error;
1988
1989 sc = ifp->if_softc;
1990 ifr = (struct ifreq *)data;
1991 error = 0;
1992
1993 s = splnet();
1994
1995 switch (command) {
1996 case SIOCSIFMTU:
1997 if (ifr->ifr_mtu > VGE_JUMBO_MTU)
1998 error = EINVAL;
1999 ifp->if_mtu = ifr->ifr_mtu;
2000 break;
2001 case SIOCSIFFLAGS:
2002 if (ifp->if_flags & IFF_UP) {
2003 if (ifp->if_flags & IFF_RUNNING &&
2004 ifp->if_flags & IFF_PROMISC &&
2005 !(sc->vge_if_flags & IFF_PROMISC)) {
2006 CSR_SETBIT_1(sc, VGE_RXCTL,
2007 VGE_RXCTL_RX_PROMISC);
2008 vge_setmulti(sc);
2009 } else if (ifp->if_flags & IFF_RUNNING &&
2010 !(ifp->if_flags & IFF_PROMISC) &&
2011 sc->vge_if_flags & IFF_PROMISC) {
2012 CSR_CLRBIT_1(sc, VGE_RXCTL,
2013 VGE_RXCTL_RX_PROMISC);
2014 vge_setmulti(sc);
2015 } else
2016 vge_init(ifp);
2017 } else {
2018 if (ifp->if_flags & IFF_RUNNING)
2019 vge_stop(sc);
2020 }
2021 sc->vge_if_flags = ifp->if_flags;
2022 break;
2023 case SIOCADDMULTI:
2024 case SIOCDELMULTI:
2025 error = (command == SIOCADDMULTI) ?
2026 ether_addmulti(ifr, &sc->sc_ethercom) :
2027 ether_delmulti(ifr, &sc->sc_ethercom);
2028
2029 if (error == ENETRESET) {
2030 /*
2031 * Multicast list has changed; set the hardware filter
2032 * accordingly.
2033 */
2034 if (ifp->if_flags & IFF_RUNNING)
2035 vge_setmulti(sc);
2036 error = 0;
2037 }
2038 break;
2039 case SIOCGIFMEDIA:
2040 case SIOCSIFMEDIA:
2041 mii = &sc->sc_mii;
2042 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2043 break;
2044 default:
2045 error = ether_ioctl(ifp, command, data);
2046 break;
2047 }
2048
2049 splx(s);
2050 return error;
2051 }
2052
2053 static void
2054 vge_watchdog(struct ifnet *ifp)
2055 {
2056 struct vge_softc *sc;
2057
2058 sc = ifp->if_softc;
2059 VGE_LOCK(sc);
2060 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2061 ifp->if_oerrors++;
2062
2063 vge_txeof(sc);
2064 vge_rxeof(sc);
2065
2066 vge_init(ifp);
2067
2068 VGE_UNLOCK(sc);
2069 }
2070
2071 /*
2072 * Stop the adapter and free any mbufs allocated to the
2073 * RX and TX lists.
2074 */
2075 static void
2076 vge_stop(struct vge_softc *sc)
2077 {
2078 int i;
2079 struct ifnet *ifp;
2080
2081 ifp = &sc->sc_ethercom.ec_if;
2082
2083 VGE_LOCK(sc);
2084 ifp->if_timer = 0;
2085
2086 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2087 #ifdef DEVICE_POLLING
2088 ether_poll_deregister(ifp);
2089 #endif /* DEVICE_POLLING */
2090
2091 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2092 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2093 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2094 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2095 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2096 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2097
2098 if (sc->vge_head != NULL) {
2099 m_freem(sc->vge_head);
2100 sc->vge_head = sc->vge_tail = NULL;
2101 }
2102
2103 /* Free the TX list buffers. */
2104
2105 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
2106 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
2107 bus_dmamap_unload(sc->vge_dmat,
2108 sc->vge_ldata.vge_tx_dmamap[i]);
2109 m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
2110 sc->vge_ldata.vge_tx_mbuf[i] = NULL;
2111 }
2112 }
2113
2114 /* Free the RX list buffers. */
2115
2116 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
2117 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
2118 bus_dmamap_unload(sc->vge_dmat,
2119 sc->vge_ldata.vge_rx_dmamap[i]);
2120 m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
2121 sc->vge_ldata.vge_rx_mbuf[i] = NULL;
2122 }
2123 }
2124
2125 VGE_UNLOCK(sc);
2126 }
2127
2128 #if VGE_POWER_MANAGEMENT
2129 /*
2130 * Device suspend routine. Stop the interface and save some PCI
2131 * settings in case the BIOS doesn't restore them properly on
2132 * resume.
2133 */
2134 static int
2135 vge_suspend(struct device *dev)
2136 {
2137 struct vge_softc *sc;
2138 int i;
2139
2140 sc = device_get_softc(dev);
2141
2142 vge_stop(sc);
2143
2144 for (i = 0; i < 5; i++)
2145 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2146 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2147 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2148 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2149 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2150
2151 sc->suspended = 1;
2152
2153 return 0;
2154 }
2155
2156 /*
2157 * Device resume routine. Restore some PCI settings in case the BIOS
2158 * doesn't, re-enable busmastering, and restart the interface if
2159 * appropriate.
2160 */
2161 static int
2162 vge_resume(struct device *dev)
2163 {
2164 struct vge_softc *sc;
2165 struct ifnet *ifp;
2166 int i;
2167
2168 sc = (void *)dev;
2169 ifp = &sc->sc_ethercom.ec_if;
2170
2171 /* better way to do this? */
2172 for (i = 0; i < 5; i++)
2173 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
2174 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
2175 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
2176 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
2177 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
2178
2179 /* reenable busmastering */
2180 pci_enable_busmaster(dev);
2181 pci_enable_io(dev, SYS_RES_MEMORY);
2182
2183 /* reinitialize interface if necessary */
2184 if (ifp->if_flags & IFF_UP)
2185 vge_init(sc);
2186
2187 sc->suspended = 0;
2188
2189 return 0;
2190 }
2191 #endif
2192
2193 /*
2194 * Stop all chip I/O so that the kernel's probe routines don't
2195 * get confused by errant DMAs when rebooting.
2196 */
2197 static void
2198 vge_shutdown(void *arg)
2199 {
2200 struct vge_softc *sc;
2201
2202 sc = arg;
2203 vge_stop(sc);
2204 }
2205