if_vge.c revision 1.15 1 /* $NetBSD: if_vge.c,v 1.15 2006/10/14 19:53:51 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 2004
5 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.15 2006/10/14 19:53:51 tsutsui Exp $");
39
40 /*
41 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
42 *
43 * Written by Bill Paul <wpaul (at) windriver.com>
44 * Senior Networking Software Engineer
45 * Wind River Systems
46 */
47
48 /*
49 * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that
50 * combines a tri-speed ethernet MAC and PHY, with the following
51 * features:
52 *
53 * o Jumbo frame support up to 16K
54 * o Transmit and receive flow control
55 * o IPv4 checksum offload
56 * o VLAN tag insertion and stripping
57 * o TCP large send
58 * o 64-bit multicast hash table filter
59 * o 64 entry CAM filter
60 * o 16K RX FIFO and 48K TX FIFO memory
61 * o Interrupt moderation
62 *
63 * The VT6122 supports up to four transmit DMA queues. The descriptors
64 * in the transmit ring can address up to 7 data fragments; frames which
65 * span more than 7 data buffers must be coalesced, but in general the
66 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
67 * long. The receive descriptors address only a single buffer.
68 *
69 * There are two peculiar design issues with the VT6122. One is that
70 * receive data buffers must be aligned on a 32-bit boundary. This is
71 * not a problem where the VT6122 is used as a LOM device in x86-based
72 * systems, but on architectures that generate unaligned access traps, we
73 * have to do some copying.
74 *
75 * The other issue has to do with the way 64-bit addresses are handled.
76 * The DMA descriptors only allow you to specify 48 bits of addressing
77 * information. The remaining 16 bits are specified using one of the
78 * I/O registers. If you only have a 32-bit system, then this isn't
79 * an issue, but if you have a 64-bit system and more than 4GB of
80 * memory, you must have to make sure your network data buffers reside
81 * in the same 48-bit 'segment.'
82 *
83 * Special thanks to Ryan Fu at VIA Networking for providing documentation
84 * and sample NICs for testing.
85 */
86
87 #include "bpfilter.h"
88
89 #include <sys/param.h>
90 #include <sys/endian.h>
91 #include <sys/systm.h>
92 #include <sys/sockio.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97
98 #include <net/if.h>
99 #include <net/if_arp.h>
100 #include <net/if_ether.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103
104 #include <net/bpf.h>
105
106 #include <machine/bus.h>
107
108 #include <dev/mii/mii.h>
109 #include <dev/mii/miivar.h>
110
111 #include <dev/pci/pcireg.h>
112 #include <dev/pci/pcivar.h>
113 #include <dev/pci/pcidevs.h>
114
115 #include <dev/pci/if_vgereg.h>
116 #include <dev/pci/if_vgevar.h>
117
118 static int vge_probe(struct device *, struct cfdata *, void *);
119 static void vge_attach(struct device *, struct device *, void *);
120
121 static int vge_encap(struct vge_softc *, struct mbuf *, int);
122
123 static int vge_allocmem(struct vge_softc *);
124 static int vge_newbuf(struct vge_softc *, int, struct mbuf *);
125 static int vge_rx_list_init(struct vge_softc *);
126 static int vge_tx_list_init(struct vge_softc *);
127 #ifndef __NO_STRICT_ALIGNMENT
128 static inline void vge_fixup_rx(struct mbuf *);
129 #endif
130 static void vge_rxeof(struct vge_softc *);
131 static void vge_txeof(struct vge_softc *);
132 static int vge_intr(void *);
133 static void vge_tick(void *);
134 static void vge_start(struct ifnet *);
135 static int vge_ioctl(struct ifnet *, u_long, caddr_t);
136 static int vge_init(struct ifnet *);
137 static void vge_stop(struct vge_softc *);
138 static void vge_watchdog(struct ifnet *);
139 #if VGE_POWER_MANAGEMENT
140 static int vge_suspend(struct device *);
141 static int vge_resume(struct device *);
142 #endif
143 static void vge_shutdown(void *);
144 static int vge_ifmedia_upd(struct ifnet *);
145 static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
146
147 static uint16_t vge_read_eeprom(struct vge_softc *, int);
148
149 static void vge_miipoll_start(struct vge_softc *);
150 static void vge_miipoll_stop(struct vge_softc *);
151 static int vge_miibus_readreg(struct device *, int, int);
152 static void vge_miibus_writereg(struct device *, int, int, int);
153 static void vge_miibus_statchg(struct device *);
154
155 static void vge_cam_clear(struct vge_softc *);
156 static int vge_cam_set(struct vge_softc *, uint8_t *);
157 static void vge_setmulti(struct vge_softc *);
158 static void vge_reset(struct vge_softc *);
159
160 #define VGE_PCI_LOIO 0x10
161 #define VGE_PCI_LOMEM 0x14
162
163 CFATTACH_DECL(vge, sizeof(struct vge_softc),
164 vge_probe, vge_attach, NULL, NULL);
165
166 /*
167 * Defragment mbuf chain contents to be as linear as possible.
168 * Returns new mbuf chain on success, NULL on failure. Old mbuf
169 * chain is always freed.
170 * XXX temporary until there would be generic function doing this.
171 */
172 #define m_defrag vge_m_defrag
173 struct mbuf * vge_m_defrag(struct mbuf *, int);
174
175 struct mbuf *
176 vge_m_defrag(struct mbuf *mold, int flags)
177 {
178 struct mbuf *m0, *mn, *n;
179 size_t sz = mold->m_pkthdr.len;
180
181 #ifdef DIAGNOSTIC
182 if ((mold->m_flags & M_PKTHDR) == 0)
183 panic("m_defrag: not a mbuf chain header");
184 #endif
185
186 MGETHDR(m0, flags, MT_DATA);
187 if (m0 == NULL)
188 return NULL;
189 m0->m_pkthdr.len = mold->m_pkthdr.len;
190 mn = m0;
191
192 do {
193 if (sz > MHLEN) {
194 MCLGET(mn, M_DONTWAIT);
195 if ((mn->m_flags & M_EXT) == 0) {
196 m_freem(m0);
197 return NULL;
198 }
199 }
200
201 mn->m_len = MIN(sz, MCLBYTES);
202
203 m_copydata(mold, mold->m_pkthdr.len - sz, mn->m_len,
204 mtod(mn, caddr_t));
205
206 sz -= mn->m_len;
207
208 if (sz > 0) {
209 /* need more mbufs */
210 MGET(n, M_NOWAIT, MT_DATA);
211 if (n == NULL) {
212 m_freem(m0);
213 return NULL;
214 }
215
216 mn->m_next = n;
217 mn = n;
218 }
219 } while (sz > 0);
220
221 return m0;
222 }
223
224 /*
225 * Read a word of data stored in the EEPROM at address 'addr.'
226 */
227 static uint16_t
228 vge_read_eeprom(struct vge_softc *sc, int addr)
229 {
230 int i;
231 uint16_t word = 0;
232
233 /*
234 * Enter EEPROM embedded programming mode. In order to
235 * access the EEPROM at all, we first have to set the
236 * EELOAD bit in the CHIPCFG2 register.
237 */
238 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
239 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
240
241 /* Select the address of the word we want to read */
242 CSR_WRITE_1(sc, VGE_EEADDR, addr);
243
244 /* Issue read command */
245 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
246
247 /* Wait for the done bit to be set. */
248 for (i = 0; i < VGE_TIMEOUT; i++) {
249 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
250 break;
251 }
252
253 if (i == VGE_TIMEOUT) {
254 printf("%s: EEPROM read timed out\n", sc->sc_dev.dv_xname);
255 return 0;
256 }
257
258 /* Read the result */
259 word = CSR_READ_2(sc, VGE_EERDDAT);
260
261 /* Turn off EEPROM access mode. */
262 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
263 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
264
265 return word;
266 }
267
268 static void
269 vge_miipoll_stop(struct vge_softc *sc)
270 {
271 int i;
272
273 CSR_WRITE_1(sc, VGE_MIICMD, 0);
274
275 for (i = 0; i < VGE_TIMEOUT; i++) {
276 DELAY(1);
277 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
278 break;
279 }
280
281 if (i == VGE_TIMEOUT) {
282 printf("%s: failed to idle MII autopoll\n",
283 sc->sc_dev.dv_xname);
284 }
285 }
286
287 static void
288 vge_miipoll_start(struct vge_softc *sc)
289 {
290 int i;
291
292 /* First, make sure we're idle. */
293
294 CSR_WRITE_1(sc, VGE_MIICMD, 0);
295 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
296
297 for (i = 0; i < VGE_TIMEOUT; i++) {
298 DELAY(1);
299 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
300 break;
301 }
302
303 if (i == VGE_TIMEOUT) {
304 printf("%s: failed to idle MII autopoll\n",
305 sc->sc_dev.dv_xname);
306 return;
307 }
308
309 /* Now enable auto poll mode. */
310
311 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
312
313 /* And make sure it started. */
314
315 for (i = 0; i < VGE_TIMEOUT; i++) {
316 DELAY(1);
317 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
318 break;
319 }
320
321 if (i == VGE_TIMEOUT) {
322 printf("%s: failed to start MII autopoll\n",
323 sc->sc_dev.dv_xname);
324 }
325 }
326
327 static int
328 vge_miibus_readreg(struct device *dev, int phy, int reg)
329 {
330 struct vge_softc *sc;
331 int i;
332 uint16_t rval;
333
334 sc = (void *)dev;
335 rval = 0;
336 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
337 return 0;
338
339 VGE_LOCK(sc);
340 vge_miipoll_stop(sc);
341
342 /* Specify the register we want to read. */
343 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
344
345 /* Issue read command. */
346 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
347
348 /* Wait for the read command bit to self-clear. */
349 for (i = 0; i < VGE_TIMEOUT; i++) {
350 DELAY(1);
351 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
352 break;
353 }
354
355 if (i == VGE_TIMEOUT)
356 printf("%s: MII read timed out\n", sc->sc_dev.dv_xname);
357 else
358 rval = CSR_READ_2(sc, VGE_MIIDATA);
359
360 vge_miipoll_start(sc);
361 VGE_UNLOCK(sc);
362
363 return rval;
364 }
365
366 static void
367 vge_miibus_writereg(struct device *dev, int phy, int reg, int data)
368 {
369 struct vge_softc *sc;
370 int i;
371
372 sc = (void *)dev;
373 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
374 return;
375
376 VGE_LOCK(sc);
377 vge_miipoll_stop(sc);
378
379 /* Specify the register we want to write. */
380 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
381
382 /* Specify the data we want to write. */
383 CSR_WRITE_2(sc, VGE_MIIDATA, data);
384
385 /* Issue write command. */
386 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
387
388 /* Wait for the write command bit to self-clear. */
389 for (i = 0; i < VGE_TIMEOUT; i++) {
390 DELAY(1);
391 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
392 break;
393 }
394
395 if (i == VGE_TIMEOUT) {
396 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
397 }
398
399 vge_miipoll_start(sc);
400 VGE_UNLOCK(sc);
401 }
402
403 static void
404 vge_cam_clear(struct vge_softc *sc)
405 {
406 int i;
407
408 /*
409 * Turn off all the mask bits. This tells the chip
410 * that none of the entries in the CAM filter are valid.
411 * desired entries will be enabled as we fill the filter in.
412 */
413
414 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
415 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
416 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
417 for (i = 0; i < 8; i++)
418 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
419
420 /* Clear the VLAN filter too. */
421
422 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
423 for (i = 0; i < 8; i++)
424 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
425
426 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
427 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
428 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
429
430 sc->vge_camidx = 0;
431 }
432
433 static int
434 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
435 {
436 int i, error;
437
438 error = 0;
439
440 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
441 return ENOSPC;
442
443 /* Select the CAM data page. */
444 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
445 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
446
447 /* Set the filter entry we want to update and enable writing. */
448 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
449
450 /* Write the address to the CAM registers */
451 for (i = 0; i < ETHER_ADDR_LEN; i++)
452 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
453
454 /* Issue a write command. */
455 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
456
457 /* Wake for it to clear. */
458 for (i = 0; i < VGE_TIMEOUT; i++) {
459 DELAY(1);
460 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
461 break;
462 }
463
464 if (i == VGE_TIMEOUT) {
465 printf("%s: setting CAM filter failed\n", sc->sc_dev.dv_xname);
466 error = EIO;
467 goto fail;
468 }
469
470 /* Select the CAM mask page. */
471 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
472 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
473
474 /* Set the mask bit that enables this filter. */
475 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx / 8),
476 1 << (sc->vge_camidx & 7));
477
478 sc->vge_camidx++;
479
480 fail:
481 /* Turn off access to CAM. */
482 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
483 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
484 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
485
486 return error;
487 }
488
489 /*
490 * Program the multicast filter. We use the 64-entry CAM filter
491 * for perfect filtering. If there's more than 64 multicast addresses,
492 * we use the hash filter insted.
493 */
494 static void
495 vge_setmulti(struct vge_softc *sc)
496 {
497 struct ifnet *ifp;
498 int error;
499 uint32_t h, hashes[2] = { 0, 0 };
500 struct ether_multi *enm;
501 struct ether_multistep step;
502
503 error = 0;
504 ifp = &sc->sc_ethercom.ec_if;
505
506 /* First, zot all the multicast entries. */
507 vge_cam_clear(sc);
508 CSR_WRITE_4(sc, VGE_MAR0, 0);
509 CSR_WRITE_4(sc, VGE_MAR1, 0);
510 ifp->if_flags &= ~IFF_ALLMULTI;
511
512 /*
513 * If the user wants allmulti or promisc mode, enable reception
514 * of all multicast frames.
515 */
516 if (ifp->if_flags & IFF_PROMISC) {
517 allmulti:
518 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
519 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
520 ifp->if_flags |= IFF_ALLMULTI;
521 return;
522 }
523
524 /* Now program new ones */
525 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
526 while (enm != NULL) {
527 /*
528 * If multicast range, fall back to ALLMULTI.
529 */
530 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
531 ETHER_ADDR_LEN) != 0)
532 goto allmulti;
533
534 error = vge_cam_set(sc, enm->enm_addrlo);
535 if (error)
536 break;
537
538 ETHER_NEXT_MULTI(step, enm);
539 }
540
541 /* If there were too many addresses, use the hash filter. */
542 if (error) {
543 vge_cam_clear(sc);
544
545 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
546 while (enm != NULL) {
547 /*
548 * If multicast range, fall back to ALLMULTI.
549 */
550 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
551 ETHER_ADDR_LEN) != 0)
552 goto allmulti;
553
554 h = ether_crc32_be(enm->enm_addrlo,
555 ETHER_ADDR_LEN) >> 26;
556 hashes[h >> 5] |= 1 << (h & 0x1f);
557
558 ETHER_NEXT_MULTI(step, enm);
559 }
560
561 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
562 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
563 }
564 }
565
566 static void
567 vge_reset(struct vge_softc *sc)
568 {
569 int i;
570
571 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
572
573 for (i = 0; i < VGE_TIMEOUT; i++) {
574 DELAY(5);
575 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
576 break;
577 }
578
579 if (i == VGE_TIMEOUT) {
580 printf("%s: soft reset timed out", sc->sc_dev.dv_xname);
581 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
582 DELAY(2000);
583 }
584
585 DELAY(5000);
586
587 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
588
589 for (i = 0; i < VGE_TIMEOUT; i++) {
590 DELAY(5);
591 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
592 break;
593 }
594
595 if (i == VGE_TIMEOUT) {
596 printf("%s: EEPROM reload timed out\n", sc->sc_dev.dv_xname);
597 return;
598 }
599
600 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
601 }
602
603 /*
604 * Probe for a VIA gigabit chip. Check the PCI vendor and device
605 * IDs against our list and return a device name if we find a match.
606 */
607 static int
608 vge_probe(struct device *parent __unused, struct cfdata *match __unused,
609 void *aux)
610 {
611 struct pci_attach_args *pa = aux;
612
613 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH
614 && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X)
615 return 1;
616
617 return 0;
618 }
619
620 static int
621 vge_allocmem(struct vge_softc *sc)
622 {
623 int error;
624 int nseg;
625 int i;
626 bus_dma_segment_t seg;
627
628 /*
629 * Allocate map for TX descriptor list.
630 */
631 error = bus_dmamap_create(sc->vge_dmat,
632 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT,
633 &sc->vge_ldata.vge_tx_list_map);
634 if (error) {
635 printf("%s: could not allocate TX dma list map\n",
636 sc->sc_dev.dv_xname);
637 return ENOMEM;
638 }
639
640 /*
641 * Allocate memory for TX descriptor list.
642 */
643
644 error = bus_dmamem_alloc(sc->vge_dmat, VGE_TX_LIST_SZ, VGE_RING_ALIGN,
645 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
646 if (error) {
647 printf("%s: could not allocate TX ring dma memory\n",
648 sc->sc_dev.dv_xname);
649 return ENOMEM;
650 }
651
652 /* Map the memory to kernel VA space */
653
654 error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, VGE_TX_LIST_SZ,
655 (caddr_t *)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT);
656 if (error) {
657 printf("%s: could not map TX ring dma memory\n",
658 sc->sc_dev.dv_xname);
659 return ENOMEM;
660 }
661
662 /* Load the map for the TX ring. */
663 error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_tx_list_map,
664 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
665 if (error) {
666 printf("%s: could not load TX ring dma memory\n",
667 sc->sc_dev.dv_xname);
668 return ENOMEM;
669 }
670
671 /* Create DMA maps for TX buffers */
672
673 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
674 error = bus_dmamap_create(sc->vge_dmat, VGE_TX_MAXLEN,
675 VGE_TX_FRAGS, VGE_TX_MAXLEN, 0,
676 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
677 &sc->vge_ldata.vge_tx_dmamap[i]);
678 if (error) {
679 printf("%s: can't create DMA map for TX\n",
680 sc->sc_dev.dv_xname);
681 return ENOMEM;
682 }
683 }
684
685 /*
686 * Allocate map for RX descriptor list.
687 */
688 error = bus_dmamap_create(sc->vge_dmat,
689 VGE_RX_LIST_SZ, 1, VGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT,
690 &sc->vge_ldata.vge_rx_list_map);
691 if (error) {
692 printf("%s: could not allocate RX dma list map\n",
693 sc->sc_dev.dv_xname);
694 return ENOMEM;
695 }
696
697 /* Allocate DMA'able memory for the RX ring */
698
699 error = bus_dmamem_alloc(sc->vge_dmat, VGE_RX_LIST_SZ, VGE_RING_ALIGN,
700 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
701 if (error)
702 return ENOMEM;
703
704 /* Map the memory to kernel VA space */
705
706 error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, VGE_RX_LIST_SZ,
707 (caddr_t *)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT);
708 if (error)
709 return ENOMEM;
710
711 /* Load the map for the RX ring. */
712 error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_rx_list_map,
713 sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
714 if (error) {
715 printf("%s: could not load RX ring dma memory\n",
716 sc->sc_dev.dv_xname);
717 return ENOMEM;
718 }
719
720 /* Create DMA maps for RX buffers */
721
722 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
723 error = bus_dmamap_create(sc->vge_dmat, MCLBYTES,
724 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
725 &sc->vge_ldata.vge_rx_dmamap[i]);
726 if (error) {
727 printf("%s: can't create DMA map for RX\n",
728 sc->sc_dev.dv_xname);
729 return ENOMEM;
730 }
731 }
732
733 return 0;
734 }
735
736 /*
737 * Attach the interface. Allocate softc structures, do ifmedia
738 * setup and ethernet/BPF attach.
739 */
740 static void
741 vge_attach(struct device *parent __unused, struct device *self, void *aux)
742 {
743 uint8_t *eaddr;
744 struct vge_softc *sc = (struct vge_softc *)self;
745 struct ifnet *ifp;
746 struct pci_attach_args *pa = aux;
747 pci_chipset_tag_t pc = pa->pa_pc;
748 const char *intrstr;
749 pci_intr_handle_t ih;
750 uint16_t val;
751
752 aprint_normal(": VIA VT612X Gigabit Ethernet (rev. %#x)\n",
753 PCI_REVISION(pa->pa_class));
754
755 /* Make sure bus-mastering is enabled */
756 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
757 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
758 PCI_COMMAND_MASTER_ENABLE);
759
760 /*
761 * Map control/status registers.
762 */
763 if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
764 &sc->vge_btag, &sc->vge_bhandle, NULL, NULL) != 0) {
765 aprint_error("%s: couldn't map memory\n", sc->sc_dev.dv_xname);
766 return;
767 }
768
769 /*
770 * Map and establish our interrupt.
771 */
772 if (pci_intr_map(pa, &ih)) {
773 aprint_error("%s: unable to map interrupt\n",
774 sc->sc_dev.dv_xname);
775 return;
776 }
777 intrstr = pci_intr_string(pc, ih);
778 sc->vge_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc);
779 if (sc->vge_intrhand == NULL) {
780 printf("%s: unable to establish interrupt",
781 sc->sc_dev.dv_xname);
782 if (intrstr != NULL)
783 printf(" at %s", intrstr);
784 printf("\n");
785 return;
786 }
787 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
788
789 /* Reset the adapter. */
790 vge_reset(sc);
791
792 /*
793 * Get station address from the EEPROM.
794 */
795 eaddr = sc->vge_eaddr;
796 val = vge_read_eeprom(sc, VGE_EE_EADDR + 0);
797 eaddr[0] = val & 0xff;
798 eaddr[1] = val >> 8;
799 val = vge_read_eeprom(sc, VGE_EE_EADDR + 1);
800 eaddr[2] = val & 0xff;
801 eaddr[3] = val >> 8;
802 val = vge_read_eeprom(sc, VGE_EE_EADDR + 2);
803 eaddr[4] = val & 0xff;
804 eaddr[5] = val >> 8;
805
806 printf("%s: Ethernet address: %s\n", sc->sc_dev.dv_xname,
807 ether_sprintf(eaddr));
808
809 /*
810 * Use the 32bit tag. Hardware supports 48bit physical addresses,
811 * but we don't use that for now.
812 */
813 sc->vge_dmat = pa->pa_dmat;
814
815 if (vge_allocmem(sc))
816 return;
817
818 ifp = &sc->sc_ethercom.ec_if;
819 ifp->if_softc = sc;
820 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
821 ifp->if_mtu = ETHERMTU;
822 ifp->if_baudrate = IF_Gbps(1);
823 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
824 ifp->if_ioctl = vge_ioctl;
825 ifp->if_start = vge_start;
826
827 /*
828 * We can support 802.1Q VLAN-sized frames and jumbo
829 * Ethernet frames.
830 */
831 sc->sc_ethercom.ec_capabilities |=
832 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU |
833 ETHERCAP_VLAN_HWTAGGING;
834
835 /*
836 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
837 */
838 ifp->if_capabilities |=
839 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
840 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
841 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
842
843 #ifdef DEVICE_POLLING
844 #ifdef IFCAP_POLLING
845 ifp->if_capabilities |= IFCAP_POLLING;
846 #endif
847 #endif
848 ifp->if_watchdog = vge_watchdog;
849 ifp->if_init = vge_init;
850 IFQ_SET_MAXLEN(&ifp->if_snd, max(VGE_IFQ_MAXLEN, IFQ_MAXLEN));
851
852 /*
853 * Initialize our media structures and probe the MII.
854 */
855 sc->sc_mii.mii_ifp = ifp;
856 sc->sc_mii.mii_readreg = vge_miibus_readreg;
857 sc->sc_mii.mii_writereg = vge_miibus_writereg;
858 sc->sc_mii.mii_statchg = vge_miibus_statchg;
859 ifmedia_init(&sc->sc_mii.mii_media, 0, vge_ifmedia_upd,
860 vge_ifmedia_sts);
861 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
862 MII_OFFSET_ANY, MIIF_DOPAUSE);
863 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
864 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
865 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
866 } else
867 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
868
869 /*
870 * Attach the interface.
871 */
872 if_attach(ifp);
873 ether_ifattach(ifp, eaddr);
874
875 callout_init(&sc->vge_timeout);
876 callout_setfunc(&sc->vge_timeout, vge_tick, sc);
877
878 /*
879 * Make sure the interface is shutdown during reboot.
880 */
881 if (shutdownhook_establish(vge_shutdown, sc) == NULL) {
882 printf("%s: WARNING: unable to establish shutdown hook\n",
883 sc->sc_dev.dv_xname);
884 }
885 }
886
887 static int
888 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m)
889 {
890 struct vge_rx_desc *d;
891 struct mbuf *m_new;
892 bus_dmamap_t map;
893 int i;
894
895 m_new = NULL;
896 if (m == NULL) {
897 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
898 if (m_new == NULL)
899 return ENOBUFS;
900
901 MCLGET(m_new, M_DONTWAIT);
902 if ((m_new->m_flags & M_EXT) == 0) {
903 m_freem(m_new);
904 return ENOBUFS;
905 }
906
907 m = m_new;
908 } else
909 m->m_data = m->m_ext.ext_buf;
910
911
912 #ifndef __NO_STRICT_ALIGNMENT
913 /*
914 * This is part of an evil trick to deal with non-x86 platforms.
915 * The VIA chip requires RX buffers to be aligned on 32-bit
916 * boundaries, but that will hose non-x86 machines. To get around
917 * this, we leave some empty space at the start of each buffer
918 * and for non-x86 hosts, we copy the buffer back two bytes
919 * to achieve word alignment. This is slightly more efficient
920 * than allocating a new buffer, copying the contents, and
921 * discarding the old buffer.
922 */
923 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
924 m_adj(m, VGE_ETHER_ALIGN);
925 #else
926 m->m_len = m->m_pkthdr.len = MCLBYTES;
927 #endif
928 map = sc->vge_ldata.vge_rx_dmamap[idx];
929
930 if (bus_dmamap_load_mbuf(sc->vge_dmat, map, m, BUS_DMA_NOWAIT) != 0)
931 goto out;
932
933 d = &sc->vge_ldata.vge_rx_list[idx];
934
935 /* If this descriptor is still owned by the chip, bail. */
936
937 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
938 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
939 printf("%s: tried to map busy descriptor\n",
940 sc->sc_dev.dv_xname);
941 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
942 goto out;
943 }
944
945 d->vge_buflen =
946 htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I);
947 d->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[0].ds_addr));
948 d->vge_addrhi = htole16(VGE_ADDR_HI(map->dm_segs[0].ds_addr) & 0xFFFF);
949 d->vge_sts = 0;
950 d->vge_ctl = 0;
951 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
952
953 bus_dmamap_sync(sc->vge_dmat,
954 sc->vge_ldata.vge_rx_dmamap[idx],
955 0, sc->vge_ldata.vge_rx_dmamap[idx]->dm_mapsize,
956 BUS_DMASYNC_PREREAD);
957
958 /*
959 * Note: the manual fails to document the fact that for
960 * proper opration, the driver needs to replentish the RX
961 * DMA ring 4 descriptors at a time (rather than one at a
962 * time, like most chips). We can allocate the new buffers
963 * but we should not set the OWN bits until we're ready
964 * to hand back 4 of them in one shot.
965 */
966
967 #define VGE_RXCHUNK 4
968 sc->vge_rx_consumed++;
969 if (sc->vge_rx_consumed == VGE_RXCHUNK) {
970 for (i = idx; i != idx - sc->vge_rx_consumed; i--) {
971 sc->vge_ldata.vge_rx_list[i].vge_sts |=
972 htole32(VGE_RDSTS_OWN);
973 VGE_RXDESCSYNC(sc, i,
974 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
975 }
976 sc->vge_rx_consumed = 0;
977 }
978
979 sc->vge_ldata.vge_rx_mbuf[idx] = m;
980
981 return 0;
982 out:
983 if (m_new != NULL)
984 m_freem(m_new);
985 return ENOMEM;
986 }
987
988 static int
989 vge_tx_list_init(struct vge_softc *sc)
990 {
991
992 memset((char *)sc->vge_ldata.vge_tx_list, 0, VGE_TX_LIST_SZ);
993 bus_dmamap_sync(sc->vge_dmat, sc->vge_ldata.vge_tx_list_map,
994 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
995 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
996
997 memset((char *)&sc->vge_ldata.vge_tx_mbuf, 0,
998 (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
999
1000 sc->vge_ldata.vge_tx_prodidx = 0;
1001 sc->vge_ldata.vge_tx_considx = 0;
1002 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
1003
1004 return 0;
1005 }
1006
1007 static int
1008 vge_rx_list_init(struct vge_softc *sc)
1009 {
1010 int i;
1011
1012 memset((char *)sc->vge_ldata.vge_rx_list, 0, VGE_RX_LIST_SZ);
1013 memset((char *)&sc->vge_ldata.vge_rx_mbuf, 0,
1014 (VGE_RX_DESC_CNT * sizeof(struct mbuf *)));
1015
1016 sc->vge_rx_consumed = 0;
1017
1018 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1019 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
1020 return (ENOBUFS);
1021 }
1022
1023 sc->vge_ldata.vge_rx_prodidx = 0;
1024 sc->vge_rx_consumed = 0;
1025 sc->vge_head = sc->vge_tail = NULL;
1026
1027 return 0;
1028 }
1029
1030 #ifndef __NO_STRICT_ALIGNMENT
1031 static inline void
1032 vge_fixup_rx(struct mbuf *m)
1033 {
1034 int i;
1035 uint16_t *src, *dst;
1036
1037 src = mtod(m, uint16_t *);
1038 dst = src - 1;
1039
1040 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1041 *dst++ = *src++;
1042
1043 m->m_data -= ETHER_ALIGN;
1044 }
1045 #endif
1046
1047 /*
1048 * RX handler. We support the reception of jumbo frames that have
1049 * been fragmented across multiple 2K mbuf cluster buffers.
1050 */
1051 static void
1052 vge_rxeof(struct vge_softc *sc)
1053 {
1054 struct mbuf *m;
1055 struct ifnet *ifp;
1056 int idx, total_len, lim;
1057 struct vge_rx_desc *cur_rx;
1058 uint32_t rxstat, rxctl;
1059
1060 VGE_LOCK_ASSERT(sc);
1061 ifp = &sc->sc_ethercom.ec_if;
1062 idx = sc->vge_ldata.vge_rx_prodidx;
1063 lim = 0;
1064
1065 /* Invalidate the descriptor memory */
1066
1067 for (;;) {
1068 cur_rx = &sc->vge_ldata.vge_rx_list[idx];
1069
1070 VGE_RXDESCSYNC(sc, idx,
1071 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1072 rxstat = le32toh(cur_rx->vge_sts);
1073 if ((rxstat & VGE_RDSTS_OWN) != 0) {
1074 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1075 break;
1076 }
1077
1078 #ifdef DEVICE_POLLING
1079 if (ifp->if_flags & IFF_POLLING) {
1080 if (sc->rxcycles <= 0)
1081 break;
1082 sc->rxcycles--;
1083 }
1084 #endif /* DEVICE_POLLING */
1085
1086 m = sc->vge_ldata.vge_rx_mbuf[idx];
1087 total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16;
1088 rxctl = le32toh(cur_rx->vge_ctl);
1089
1090 /* Invalidate the RX mbuf and unload its map */
1091
1092 bus_dmamap_sync(sc->vge_dmat,
1093 sc->vge_ldata.vge_rx_dmamap[idx],
1094 0, sc->vge_ldata.vge_rx_dmamap[idx]->dm_mapsize,
1095 BUS_DMASYNC_POSTREAD);
1096 bus_dmamap_unload(sc->vge_dmat,
1097 sc->vge_ldata.vge_rx_dmamap[idx]);
1098
1099 /*
1100 * If the 'start of frame' bit is set, this indicates
1101 * either the first fragment in a multi-fragment receive,
1102 * or an intermediate fragment. Either way, we want to
1103 * accumulate the buffers.
1104 */
1105 if (rxstat & VGE_RXPKT_SOF) {
1106 m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
1107 if (sc->vge_head == NULL)
1108 sc->vge_head = sc->vge_tail = m;
1109 else {
1110 m->m_flags &= ~M_PKTHDR;
1111 sc->vge_tail->m_next = m;
1112 sc->vge_tail = m;
1113 }
1114 vge_newbuf(sc, idx, NULL);
1115 VGE_RX_DESC_INC(idx);
1116 continue;
1117 }
1118
1119 /*
1120 * Bad/error frames will have the RXOK bit cleared.
1121 * However, there's one error case we want to allow:
1122 * if a VLAN tagged frame arrives and the chip can't
1123 * match it against the CAM filter, it considers this
1124 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1125 * We don't want to drop the frame though: our VLAN
1126 * filtering is done in software.
1127 */
1128 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM)
1129 && !(rxstat & VGE_RDSTS_CSUMERR)) {
1130 ifp->if_ierrors++;
1131 /*
1132 * If this is part of a multi-fragment packet,
1133 * discard all the pieces.
1134 */
1135 if (sc->vge_head != NULL) {
1136 m_freem(sc->vge_head);
1137 sc->vge_head = sc->vge_tail = NULL;
1138 }
1139 vge_newbuf(sc, idx, m);
1140 VGE_RX_DESC_INC(idx);
1141 continue;
1142 }
1143
1144 /*
1145 * If allocating a replacement mbuf fails,
1146 * reload the current one.
1147 */
1148
1149 if (vge_newbuf(sc, idx, NULL)) {
1150 ifp->if_ierrors++;
1151 if (sc->vge_head != NULL) {
1152 m_freem(sc->vge_head);
1153 sc->vge_head = sc->vge_tail = NULL;
1154 }
1155 vge_newbuf(sc, idx, m);
1156 VGE_RX_DESC_INC(idx);
1157 continue;
1158 }
1159
1160 VGE_RX_DESC_INC(idx);
1161
1162 if (sc->vge_head != NULL) {
1163 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
1164 /*
1165 * Special case: if there's 4 bytes or less
1166 * in this buffer, the mbuf can be discarded:
1167 * the last 4 bytes is the CRC, which we don't
1168 * care about anyway.
1169 */
1170 if (m->m_len <= ETHER_CRC_LEN) {
1171 sc->vge_tail->m_len -=
1172 (ETHER_CRC_LEN - m->m_len);
1173 m_freem(m);
1174 } else {
1175 m->m_len -= ETHER_CRC_LEN;
1176 m->m_flags &= ~M_PKTHDR;
1177 sc->vge_tail->m_next = m;
1178 }
1179 m = sc->vge_head;
1180 sc->vge_head = sc->vge_tail = NULL;
1181 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1182 } else
1183 m->m_pkthdr.len = m->m_len =
1184 (total_len - ETHER_CRC_LEN);
1185
1186 #ifndef __NO_STRICT_ALIGNMENT
1187 vge_fixup_rx(m);
1188 #endif
1189 ifp->if_ipackets++;
1190 m->m_pkthdr.rcvif = ifp;
1191
1192 /* Do RX checksumming if enabled */
1193 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) {
1194
1195 /* Check IP header checksum */
1196 if (rxctl & VGE_RDCTL_IPPKT)
1197 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1198 if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0)
1199 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1200 }
1201
1202 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
1203 /* Check UDP checksum */
1204 if (rxctl & VGE_RDCTL_TCPPKT)
1205 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1206
1207 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1208 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1209 }
1210
1211 if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) {
1212 /* Check UDP checksum */
1213 if (rxctl & VGE_RDCTL_UDPPKT)
1214 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1215
1216 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1217 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1218 }
1219
1220 if (rxstat & VGE_RDSTS_VTAG)
1221 VLAN_INPUT_TAG(ifp, m,
1222 ntohs((rxctl & VGE_RDCTL_VLANID)), continue);
1223
1224 #if NBPFILTER > 0
1225 /*
1226 * Handle BPF listeners.
1227 */
1228 if (ifp->if_bpf)
1229 bpf_mtap(ifp->if_bpf, m);
1230 #endif
1231
1232 VGE_UNLOCK(sc);
1233 (*ifp->if_input)(ifp, m);
1234 VGE_LOCK(sc);
1235
1236 lim++;
1237 if (lim == VGE_RX_DESC_CNT)
1238 break;
1239
1240 }
1241
1242 sc->vge_ldata.vge_rx_prodidx = idx;
1243 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1244 }
1245
1246 static void
1247 vge_txeof(struct vge_softc *sc)
1248 {
1249 struct ifnet *ifp;
1250 uint32_t txstat;
1251 int idx;
1252
1253 ifp = &sc->sc_ethercom.ec_if;
1254 idx = sc->vge_ldata.vge_tx_considx;
1255
1256 while (idx != sc->vge_ldata.vge_tx_prodidx) {
1257 VGE_TXDESCSYNC(sc, idx,
1258 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1259
1260 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
1261 if (txstat & VGE_TDSTS_OWN) {
1262 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1263 break;
1264 }
1265
1266 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
1267 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
1268 bus_dmamap_unload(sc->vge_dmat,
1269 sc->vge_ldata.vge_tx_dmamap[idx]);
1270 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1271 ifp->if_collisions++;
1272 if (txstat & VGE_TDSTS_TXERR)
1273 ifp->if_oerrors++;
1274 else
1275 ifp->if_opackets++;
1276
1277 sc->vge_ldata.vge_tx_free++;
1278 VGE_TX_DESC_INC(idx);
1279 }
1280
1281 /* No changes made to the TX ring, so no flush needed */
1282
1283 if (idx != sc->vge_ldata.vge_tx_considx) {
1284 sc->vge_ldata.vge_tx_considx = idx;
1285 ifp->if_flags &= ~IFF_OACTIVE;
1286 ifp->if_timer = 0;
1287 }
1288
1289 /*
1290 * If not all descriptors have been released reaped yet,
1291 * reload the timer so that we will eventually get another
1292 * interrupt that will cause us to re-enter this routine.
1293 * This is done in case the transmitter has gone idle.
1294 */
1295 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) {
1296 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1297 }
1298 }
1299
1300 static void
1301 vge_tick(void *xsc)
1302 {
1303 struct vge_softc *sc;
1304 struct ifnet *ifp;
1305 struct mii_data *mii;
1306 int s;
1307
1308 sc = xsc;
1309 ifp = &sc->sc_ethercom.ec_if;
1310 mii = &sc->sc_mii;
1311
1312 s = splnet();
1313
1314 VGE_LOCK(sc);
1315
1316 callout_schedule(&sc->vge_timeout, hz);
1317
1318 mii_tick(mii);
1319 if (sc->vge_link) {
1320 if (!(mii->mii_media_status & IFM_ACTIVE))
1321 sc->vge_link = 0;
1322 } else {
1323 if (mii->mii_media_status & IFM_ACTIVE &&
1324 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1325 sc->vge_link = 1;
1326 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1327 vge_start(ifp);
1328 }
1329 }
1330
1331 VGE_UNLOCK(sc);
1332
1333 splx(s);
1334 }
1335
1336 #ifdef DEVICE_POLLING
1337 static void
1338 vge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1339 {
1340 struct vge_softc *sc = ifp->if_softc;
1341
1342 VGE_LOCK(sc);
1343 #ifdef IFCAP_POLLING
1344 if (!(ifp->if_capenable & IFCAP_POLLING)) {
1345 ether_poll_deregister(ifp);
1346 cmd = POLL_DEREGISTER;
1347 }
1348 #endif
1349 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1350 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1351 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
1352 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1353 goto done;
1354 }
1355
1356 sc->rxcycles = count;
1357 vge_rxeof(sc);
1358 vge_txeof(sc);
1359
1360 #if __FreeBSD_version < 502114
1361 if (ifp->if_snd.ifq_head != NULL)
1362 #else
1363 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1364 #endif
1365 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
1366
1367 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1368 uint32_t status;
1369 status = CSR_READ_4(sc, VGE_ISR);
1370 if (status == 0xFFFFFFFF)
1371 goto done;
1372 if (status)
1373 CSR_WRITE_4(sc, VGE_ISR, status);
1374
1375 /*
1376 * XXX check behaviour on receiver stalls.
1377 */
1378
1379 if (status & VGE_ISR_TXDMA_STALL ||
1380 status & VGE_ISR_RXDMA_STALL)
1381 vge_init(sc);
1382
1383 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1384 vge_rxeof(sc);
1385 ifp->if_ierrors++;
1386 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1387 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1388 }
1389 }
1390 done:
1391 VGE_UNLOCK(sc);
1392 }
1393 #endif /* DEVICE_POLLING */
1394
1395 static int
1396 vge_intr(void *arg)
1397 {
1398 struct vge_softc *sc;
1399 struct ifnet *ifp;
1400 uint32_t status;
1401 int claim;
1402
1403 sc = arg;
1404 claim = 0;
1405 if (sc->suspended) {
1406 return claim;
1407 }
1408
1409 ifp = &sc->sc_ethercom.ec_if;
1410
1411 VGE_LOCK(sc);
1412
1413 if (!(ifp->if_flags & IFF_UP)) {
1414 VGE_UNLOCK(sc);
1415 return claim;
1416 }
1417
1418 #ifdef DEVICE_POLLING
1419 if (ifp->if_flags & IFF_POLLING)
1420 goto done;
1421 if (
1422 #ifdef IFCAP_POLLING
1423 (ifp->if_capenable & IFCAP_POLLING) &&
1424 #endif
1425 ether_poll_register(vge_poll, ifp)) { /* ok, disable interrupts */
1426 CSR_WRITE_4(sc, VGE_IMR, 0);
1427 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1428 vge_poll(ifp, 0, 1);
1429 goto done;
1430 }
1431
1432 #endif /* DEVICE_POLLING */
1433
1434 /* Disable interrupts */
1435 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1436
1437 for (;;) {
1438
1439 status = CSR_READ_4(sc, VGE_ISR);
1440 /* If the card has gone away the read returns 0xffff. */
1441 if (status == 0xFFFFFFFF)
1442 break;
1443
1444 if (status) {
1445 claim = 1;
1446 CSR_WRITE_4(sc, VGE_ISR, status);
1447 }
1448
1449 if ((status & VGE_INTRS) == 0)
1450 break;
1451
1452 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1453 vge_rxeof(sc);
1454
1455 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1456 vge_rxeof(sc);
1457 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1458 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1459 }
1460
1461 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1462 vge_txeof(sc);
1463
1464 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1465 vge_init(ifp);
1466
1467 if (status & VGE_ISR_LINKSTS)
1468 vge_tick(sc);
1469 }
1470
1471 /* Re-enable interrupts */
1472 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1473
1474 #ifdef DEVICE_POLLING
1475 done:
1476 #endif
1477 VGE_UNLOCK(sc);
1478
1479 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1480 vge_start(ifp);
1481
1482 return claim;
1483 }
1484
1485 static int
1486 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx)
1487 {
1488 struct vge_tx_desc *d;
1489 struct vge_tx_frag *f;
1490 struct mbuf *m_new;
1491 bus_dmamap_t map;
1492 int seg, error, flags;
1493 struct m_tag *mtag;
1494 size_t sz;
1495
1496 d = &sc->vge_ldata.vge_tx_list[idx];
1497
1498 /* If this descriptor is still owned by the chip, bail. */
1499 if (sc->vge_ldata.vge_tx_free <= 2) {
1500 VGE_TXDESCSYNC(sc, idx,
1501 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1502 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) {
1503 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1504 return ENOBUFS;
1505 }
1506 }
1507
1508 map = sc->vge_ldata.vge_tx_dmamap[idx];
1509 error = bus_dmamap_load_mbuf(sc->vge_dmat, map, m_head, BUS_DMA_NOWAIT);
1510
1511 /* If too many segments to map, coalesce */
1512 if (error == EFBIG) {
1513 m_new = m_defrag(m_head, M_DONTWAIT);
1514 if (m_new == NULL)
1515 return (error);
1516
1517 error = bus_dmamap_load_mbuf(sc->vge_dmat, map,
1518 m_new, BUS_DMA_NOWAIT);
1519 if (error) {
1520 m_freem(m_new);
1521 return error;
1522 }
1523
1524 m_head = m_new;
1525 } else if (error)
1526 return error;
1527
1528 for (seg = 0, f = &d->vge_frag[0]; seg < map->dm_nsegs; seg++, f++) {
1529 f->vge_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len));
1530 f->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[seg].ds_addr));
1531 f->vge_addrhi = htole16(VGE_ADDR_HI(map->dm_segs[seg].ds_addr));
1532 }
1533
1534 /* Argh. This chip does not autopad short frames */
1535
1536 sz = m_head->m_pkthdr.len;
1537 if (m_head->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1538 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - sz));
1539 f->vge_addrlo = htole32(VGE_ADDR_LO(map->dm_segs[0].ds_addr));
1540 f->vge_addrhi =
1541 htole16(VGE_ADDR_HI(map->dm_segs[0].ds_addr) & 0xFFFF);
1542 sz = VGE_MIN_FRAMELEN;
1543 seg++;
1544 }
1545 VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE);
1546
1547 /*
1548 * When telling the chip how many segments there are, we
1549 * must use nsegs + 1 instead of just nsegs. Darned if I
1550 * know why.
1551 */
1552 seg++;
1553
1554 flags = 0;
1555 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
1556 flags |= VGE_TDCTL_IPCSUM;
1557 if (m_head->m_pkthdr.csum_flags & M_CSUM_TCPv4)
1558 flags |= VGE_TDCTL_TCPCSUM;
1559 if (m_head->m_pkthdr.csum_flags & M_CSUM_UDPv4)
1560 flags |= VGE_TDCTL_UDPCSUM;
1561 d->vge_sts = htole32(sz << 16);
1562 d->vge_ctl = htole32(flags | (seg << 28) | VGE_TD_LS_NORM);
1563
1564 if (sz > ETHERMTU + ETHER_HDR_LEN)
1565 d->vge_ctl |= htole32(VGE_TDCTL_JUMBO);
1566
1567 bus_dmamap_sync(sc->vge_dmat, map, 0, map->dm_mapsize,
1568 BUS_DMASYNC_PREWRITE);
1569
1570 sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
1571 sc->vge_ldata.vge_tx_free--;
1572
1573 /*
1574 * Set up hardware VLAN tagging.
1575 */
1576
1577 mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m_head);
1578 if (mtag != NULL)
1579 d->vge_ctl |=
1580 htole32(htons(VLAN_TAG_VALUE(mtag)) | VGE_TDCTL_VTAG);
1581
1582 d->vge_sts |= htole32(VGE_TDSTS_OWN);
1583
1584 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1585
1586 return 0;
1587 }
1588
1589 /*
1590 * Main transmit routine.
1591 */
1592
1593 static void
1594 vge_start(struct ifnet *ifp)
1595 {
1596 struct vge_softc *sc;
1597 struct mbuf *m_head;
1598 int idx, pidx, error;
1599
1600 sc = ifp->if_softc;
1601 VGE_LOCK(sc);
1602
1603 if (!sc->vge_link ||
1604 (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
1605 VGE_UNLOCK(sc);
1606 return;
1607 }
1608
1609 m_head = NULL;
1610 idx = sc->vge_ldata.vge_tx_prodidx;
1611
1612 pidx = idx - 1;
1613 if (pidx < 0)
1614 pidx = VGE_TX_DESC_CNT - 1;
1615
1616 /*
1617 * Loop through the send queue, setting up transmit descriptors
1618 * until we drain the queue, or use up all available transmit
1619 * descriptors.
1620 */
1621 for (;;) {
1622 /* Grab a packet off the queue. */
1623 IFQ_POLL(&ifp->if_snd, m_head);
1624 if (m_head == NULL)
1625 break;
1626
1627 if (sc->vge_ldata.vge_tx_mbuf[idx] != NULL) {
1628 /*
1629 * Slot already used, stop for now.
1630 */
1631 ifp->if_flags |= IFF_OACTIVE;
1632 break;
1633 }
1634
1635 if ((error = vge_encap(sc, m_head, idx))) {
1636 if (error == EFBIG) {
1637 printf("%s: Tx packet consumes too many "
1638 "DMA segments, dropping...\n",
1639 sc->sc_dev.dv_xname);
1640 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1641 m_freem(m_head);
1642 continue;
1643 }
1644
1645 /*
1646 * Short on resources, just stop for now.
1647 */
1648 if (error == ENOBUFS)
1649 ifp->if_flags |= IFF_OACTIVE;
1650 break;
1651 }
1652
1653 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1654
1655 /*
1656 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1657 */
1658
1659 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
1660 htole16(VGE_TXDESC_Q);
1661 VGE_TXDESCSYNC(sc, pidx,
1662 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1663
1664 if (sc->vge_ldata.vge_tx_mbuf[idx] != m_head) {
1665 m_freem(m_head);
1666 m_head = sc->vge_ldata.vge_tx_mbuf[idx];
1667 }
1668
1669 pidx = idx;
1670 VGE_TX_DESC_INC(idx);
1671
1672 /*
1673 * If there's a BPF listener, bounce a copy of this frame
1674 * to him.
1675 */
1676 #if NBPFILTER > 0
1677 if (ifp->if_bpf)
1678 bpf_mtap(ifp->if_bpf, m_head);
1679 #endif
1680 }
1681
1682 if (idx == sc->vge_ldata.vge_tx_prodidx) {
1683 VGE_UNLOCK(sc);
1684 return;
1685 }
1686
1687 /* Issue a transmit command. */
1688 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1689
1690 sc->vge_ldata.vge_tx_prodidx = idx;
1691
1692 /*
1693 * Use the countdown timer for interrupt moderation.
1694 * 'TX done' interrupts are disabled. Instead, we reset the
1695 * countdown timer, which will begin counting until it hits
1696 * the value in the SSTIMER register, and then trigger an
1697 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1698 * the timer count is reloaded. Only when the transmitter
1699 * is idle will the timer hit 0 and an interrupt fire.
1700 */
1701 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1702
1703 VGE_UNLOCK(sc);
1704
1705 /*
1706 * Set a timeout in case the chip goes out to lunch.
1707 */
1708 ifp->if_timer = 5;
1709 }
1710
1711 static int
1712 vge_init(struct ifnet *ifp)
1713 {
1714 struct vge_softc *sc;
1715 int i;
1716
1717 sc = ifp->if_softc;
1718
1719 VGE_LOCK(sc);
1720
1721 /*
1722 * Cancel pending I/O and free all RX/TX buffers.
1723 */
1724 vge_stop(sc);
1725 vge_reset(sc);
1726
1727 /*
1728 * Initialize the RX and TX descriptors and mbufs.
1729 */
1730
1731 vge_rx_list_init(sc);
1732 vge_tx_list_init(sc);
1733
1734 /* Set our station address */
1735 for (i = 0; i < ETHER_ADDR_LEN; i++)
1736 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->vge_eaddr[i]);
1737
1738 /*
1739 * Set receive FIFO threshold. Also allow transmission and
1740 * reception of VLAN tagged frames.
1741 */
1742 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1743 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1744
1745 /* Set DMA burst length */
1746 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1747 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1748
1749 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1750
1751 /* Set collision backoff algorithm */
1752 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1753 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1754 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1755
1756 /* Disable LPSEL field in priority resolution */
1757 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1758
1759 /*
1760 * Load the addresses of the DMA queues into the chip.
1761 * Note that we only use one transmit queue.
1762 */
1763
1764 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
1765 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_map->dm_segs[0].ds_addr));
1766 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
1767
1768 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
1769 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_map->dm_segs[0].ds_addr));
1770 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
1771 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
1772
1773 /* Enable and wake up the RX descriptor queue */
1774 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1775 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1776
1777 /* Enable the TX descriptor queue */
1778 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1779
1780 /* Set up the receive filter -- allow large frames for VLANs. */
1781 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1782
1783 /* If we want promiscuous mode, set the allframes bit. */
1784 if (ifp->if_flags & IFF_PROMISC) {
1785 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1786 }
1787
1788 /* Set capture broadcast bit to capture broadcast frames. */
1789 if (ifp->if_flags & IFF_BROADCAST) {
1790 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1791 }
1792
1793 /* Set multicast bit to capture multicast frames. */
1794 if (ifp->if_flags & IFF_MULTICAST) {
1795 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1796 }
1797
1798 /* Init the cam filter. */
1799 vge_cam_clear(sc);
1800
1801 /* Init the multicast filter. */
1802 vge_setmulti(sc);
1803
1804 /* Enable flow control */
1805
1806 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1807
1808 /* Enable jumbo frame reception (if desired) */
1809
1810 /* Start the MAC. */
1811 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1812 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1813 CSR_WRITE_1(sc, VGE_CRS0,
1814 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1815
1816 /*
1817 * Configure one-shot timer for microsecond
1818 * resulution and load it for 500 usecs.
1819 */
1820 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
1821 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
1822
1823 /*
1824 * Configure interrupt moderation for receive. Enable
1825 * the holdoff counter and load it, and set the RX
1826 * suppression count to the number of descriptors we
1827 * want to allow before triggering an interrupt.
1828 * The holdoff timer is in units of 20 usecs.
1829 */
1830
1831 #ifdef notyet
1832 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
1833 /* Select the interrupt holdoff timer page. */
1834 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1835 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
1836 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
1837
1838 /* Enable use of the holdoff timer. */
1839 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
1840 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
1841
1842 /* Select the RX suppression threshold page. */
1843 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1844 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
1845 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
1846
1847 /* Restore the page select bits. */
1848 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1849 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
1850 #endif
1851
1852 #ifdef DEVICE_POLLING
1853 /*
1854 * Disable interrupts if we are polling.
1855 */
1856 if (ifp->if_flags & IFF_POLLING) {
1857 CSR_WRITE_4(sc, VGE_IMR, 0);
1858 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1859 } else /* otherwise ... */
1860 #endif /* DEVICE_POLLING */
1861 {
1862 /*
1863 * Enable interrupts.
1864 */
1865 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1866 CSR_WRITE_4(sc, VGE_ISR, 0);
1867 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1868 }
1869
1870 mii_mediachg(&sc->sc_mii);
1871
1872 ifp->if_flags |= IFF_RUNNING;
1873 ifp->if_flags &= ~IFF_OACTIVE;
1874
1875 sc->vge_if_flags = 0;
1876 sc->vge_link = 0;
1877
1878 VGE_UNLOCK(sc);
1879
1880 callout_schedule(&sc->vge_timeout, hz);
1881
1882 return 0;
1883 }
1884
1885 /*
1886 * Set media options.
1887 */
1888 static int
1889 vge_ifmedia_upd(struct ifnet *ifp)
1890 {
1891 struct vge_softc *sc;
1892
1893 sc = ifp->if_softc;
1894 mii_mediachg(&sc->sc_mii);
1895
1896 return 0;
1897 }
1898
1899 /*
1900 * Report current media status.
1901 */
1902 static void
1903 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1904 {
1905 struct vge_softc *sc;
1906 struct mii_data *mii;
1907
1908 sc = ifp->if_softc;
1909 mii = &sc->sc_mii;
1910
1911 mii_pollstat(mii);
1912 ifmr->ifm_active = mii->mii_media_active;
1913 ifmr->ifm_status = mii->mii_media_status;
1914 }
1915
1916 static void
1917 vge_miibus_statchg(struct device *self)
1918 {
1919 struct vge_softc *sc;
1920 struct mii_data *mii;
1921 struct ifmedia_entry *ife;
1922
1923 sc = (void *)self;
1924 mii = &sc->sc_mii;
1925 ife = mii->mii_media.ifm_cur;
1926 /*
1927 * If the user manually selects a media mode, we need to turn
1928 * on the forced MAC mode bit in the DIAGCTL register. If the
1929 * user happens to choose a full duplex mode, we also need to
1930 * set the 'force full duplex' bit. This applies only to
1931 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
1932 * mode is disabled, and in 1000baseT mode, full duplex is
1933 * always implied, so we turn on the forced mode bit but leave
1934 * the FDX bit cleared.
1935 */
1936
1937 switch (IFM_SUBTYPE(ife->ifm_media)) {
1938 case IFM_AUTO:
1939 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1940 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1941 break;
1942 case IFM_1000_T:
1943 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1944 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1945 break;
1946 case IFM_100_TX:
1947 case IFM_10_T:
1948 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1949 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
1950 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1951 } else {
1952 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1953 }
1954 break;
1955 default:
1956 printf("%s: unknown media type: %x\n",
1957 sc->sc_dev.dv_xname,
1958 IFM_SUBTYPE(ife->ifm_media));
1959 break;
1960 }
1961 }
1962
1963 static int
1964 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1965 {
1966 struct vge_softc *sc;
1967 struct ifreq *ifr;
1968 struct mii_data *mii;
1969 int s, error;
1970
1971 sc = ifp->if_softc;
1972 ifr = (struct ifreq *)data;
1973 error = 0;
1974
1975 s = splnet();
1976
1977 switch (command) {
1978 case SIOCSIFMTU:
1979 if (ifr->ifr_mtu > VGE_JUMBO_MTU)
1980 error = EINVAL;
1981 ifp->if_mtu = ifr->ifr_mtu;
1982 break;
1983 case SIOCSIFFLAGS:
1984 if (ifp->if_flags & IFF_UP) {
1985 if (ifp->if_flags & IFF_RUNNING &&
1986 ifp->if_flags & IFF_PROMISC &&
1987 !(sc->vge_if_flags & IFF_PROMISC)) {
1988 CSR_SETBIT_1(sc, VGE_RXCTL,
1989 VGE_RXCTL_RX_PROMISC);
1990 vge_setmulti(sc);
1991 } else if (ifp->if_flags & IFF_RUNNING &&
1992 !(ifp->if_flags & IFF_PROMISC) &&
1993 sc->vge_if_flags & IFF_PROMISC) {
1994 CSR_CLRBIT_1(sc, VGE_RXCTL,
1995 VGE_RXCTL_RX_PROMISC);
1996 vge_setmulti(sc);
1997 } else
1998 vge_init(ifp);
1999 } else {
2000 if (ifp->if_flags & IFF_RUNNING)
2001 vge_stop(sc);
2002 }
2003 sc->vge_if_flags = ifp->if_flags;
2004 break;
2005 case SIOCADDMULTI:
2006 case SIOCDELMULTI:
2007 error = (command == SIOCADDMULTI) ?
2008 ether_addmulti(ifr, &sc->sc_ethercom) :
2009 ether_delmulti(ifr, &sc->sc_ethercom);
2010
2011 if (error == ENETRESET) {
2012 /*
2013 * Multicast list has changed; set the hardware filter
2014 * accordingly.
2015 */
2016 if (ifp->if_flags & IFF_RUNNING)
2017 vge_setmulti(sc);
2018 error = 0;
2019 }
2020 break;
2021 case SIOCGIFMEDIA:
2022 case SIOCSIFMEDIA:
2023 mii = &sc->sc_mii;
2024 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2025 break;
2026 default:
2027 error = ether_ioctl(ifp, command, data);
2028 break;
2029 }
2030
2031 splx(s);
2032 return error;
2033 }
2034
2035 static void
2036 vge_watchdog(struct ifnet *ifp)
2037 {
2038 struct vge_softc *sc;
2039
2040 sc = ifp->if_softc;
2041 VGE_LOCK(sc);
2042 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2043 ifp->if_oerrors++;
2044
2045 vge_txeof(sc);
2046 vge_rxeof(sc);
2047
2048 vge_init(ifp);
2049
2050 VGE_UNLOCK(sc);
2051 }
2052
2053 /*
2054 * Stop the adapter and free any mbufs allocated to the
2055 * RX and TX lists.
2056 */
2057 static void
2058 vge_stop(struct vge_softc *sc)
2059 {
2060 int i;
2061 struct ifnet *ifp;
2062
2063 ifp = &sc->sc_ethercom.ec_if;
2064
2065 VGE_LOCK(sc);
2066 ifp->if_timer = 0;
2067
2068 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2069 #ifdef DEVICE_POLLING
2070 ether_poll_deregister(ifp);
2071 #endif /* DEVICE_POLLING */
2072
2073 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2074 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2075 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2076 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2077 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2078 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2079
2080 if (sc->vge_head != NULL) {
2081 m_freem(sc->vge_head);
2082 sc->vge_head = sc->vge_tail = NULL;
2083 }
2084
2085 /* Free the TX list buffers. */
2086
2087 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
2088 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
2089 bus_dmamap_unload(sc->vge_dmat,
2090 sc->vge_ldata.vge_tx_dmamap[i]);
2091 m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
2092 sc->vge_ldata.vge_tx_mbuf[i] = NULL;
2093 }
2094 }
2095
2096 /* Free the RX list buffers. */
2097
2098 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
2099 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
2100 bus_dmamap_unload(sc->vge_dmat,
2101 sc->vge_ldata.vge_rx_dmamap[i]);
2102 m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
2103 sc->vge_ldata.vge_rx_mbuf[i] = NULL;
2104 }
2105 }
2106
2107 VGE_UNLOCK(sc);
2108 }
2109
2110 #if VGE_POWER_MANAGEMENT
2111 /*
2112 * Device suspend routine. Stop the interface and save some PCI
2113 * settings in case the BIOS doesn't restore them properly on
2114 * resume.
2115 */
2116 static int
2117 vge_suspend(struct device *dev)
2118 {
2119 struct vge_softc *sc;
2120 int i;
2121
2122 sc = device_get_softc(dev);
2123
2124 vge_stop(sc);
2125
2126 for (i = 0; i < 5; i++)
2127 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2128 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2129 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2130 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2131 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2132
2133 sc->suspended = 1;
2134
2135 return 0;
2136 }
2137
2138 /*
2139 * Device resume routine. Restore some PCI settings in case the BIOS
2140 * doesn't, re-enable busmastering, and restart the interface if
2141 * appropriate.
2142 */
2143 static int
2144 vge_resume(struct device *dev)
2145 {
2146 struct vge_softc *sc;
2147 struct ifnet *ifp;
2148 int i;
2149
2150 sc = (void *)dev;
2151 ifp = &sc->sc_ethercom.ec_if;
2152
2153 /* better way to do this? */
2154 for (i = 0; i < 5; i++)
2155 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
2156 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
2157 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
2158 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
2159 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
2160
2161 /* reenable busmastering */
2162 pci_enable_busmaster(dev);
2163 pci_enable_io(dev, SYS_RES_MEMORY);
2164
2165 /* reinitialize interface if necessary */
2166 if (ifp->if_flags & IFF_UP)
2167 vge_init(sc);
2168
2169 sc->suspended = 0;
2170
2171 return 0;
2172 }
2173 #endif
2174
2175 /*
2176 * Stop all chip I/O so that the kernel's probe routines don't
2177 * get confused by errant DMAs when rebooting.
2178 */
2179 static void
2180 vge_shutdown(void *arg)
2181 {
2182 struct vge_softc *sc;
2183
2184 sc = arg;
2185 vge_stop(sc);
2186 }
2187