if_vge.c revision 1.42 1 /* $NetBSD: if_vge.c,v 1.42 2008/11/07 00:20:07 dyoung Exp $ */
2
3 /*-
4 * Copyright (c) 2004
5 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.42 2008/11/07 00:20:07 dyoung Exp $");
39
40 /*
41 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
42 *
43 * Written by Bill Paul <wpaul (at) windriver.com>
44 * Senior Networking Software Engineer
45 * Wind River Systems
46 */
47
48 /*
49 * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that
50 * combines a tri-speed ethernet MAC and PHY, with the following
51 * features:
52 *
53 * o Jumbo frame support up to 16K
54 * o Transmit and receive flow control
55 * o IPv4 checksum offload
56 * o VLAN tag insertion and stripping
57 * o TCP large send
58 * o 64-bit multicast hash table filter
59 * o 64 entry CAM filter
60 * o 16K RX FIFO and 48K TX FIFO memory
61 * o Interrupt moderation
62 *
63 * The VT6122 supports up to four transmit DMA queues. The descriptors
64 * in the transmit ring can address up to 7 data fragments; frames which
65 * span more than 7 data buffers must be coalesced, but in general the
66 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
67 * long. The receive descriptors address only a single buffer.
68 *
69 * There are two peculiar design issues with the VT6122. One is that
70 * receive data buffers must be aligned on a 32-bit boundary. This is
71 * not a problem where the VT6122 is used as a LOM device in x86-based
72 * systems, but on architectures that generate unaligned access traps, we
73 * have to do some copying.
74 *
75 * The other issue has to do with the way 64-bit addresses are handled.
76 * The DMA descriptors only allow you to specify 48 bits of addressing
77 * information. The remaining 16 bits are specified using one of the
78 * I/O registers. If you only have a 32-bit system, then this isn't
79 * an issue, but if you have a 64-bit system and more than 4GB of
80 * memory, you must have to make sure your network data buffers reside
81 * in the same 48-bit 'segment.'
82 *
83 * Special thanks to Ryan Fu at VIA Networking for providing documentation
84 * and sample NICs for testing.
85 */
86
87 #include "bpfilter.h"
88
89 #include <sys/param.h>
90 #include <sys/endian.h>
91 #include <sys/systm.h>
92 #include <sys/device.h>
93 #include <sys/sockio.h>
94 #include <sys/mbuf.h>
95 #include <sys/malloc.h>
96 #include <sys/kernel.h>
97 #include <sys/socket.h>
98
99 #include <net/if.h>
100 #include <net/if_arp.h>
101 #include <net/if_ether.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104
105 #include <net/bpf.h>
106
107 #include <sys/bus.h>
108
109 #include <dev/mii/mii.h>
110 #include <dev/mii/miivar.h>
111
112 #include <dev/pci/pcireg.h>
113 #include <dev/pci/pcivar.h>
114 #include <dev/pci/pcidevs.h>
115
116 #include <dev/pci/if_vgereg.h>
117
118 #define VGE_IFQ_MAXLEN 64
119
120 #define VGE_RING_ALIGN 256
121
122 #define VGE_NTXDESC 256
123 #define VGE_NTXDESC_MASK (VGE_NTXDESC - 1)
124 #define VGE_NEXT_TXDESC(x) ((x + 1) & VGE_NTXDESC_MASK)
125 #define VGE_PREV_TXDESC(x) ((x - 1) & VGE_NTXDESC_MASK)
126
127 #define VGE_NRXDESC 256 /* Must be a multiple of 4!! */
128 #define VGE_NRXDESC_MASK (VGE_NRXDESC - 1)
129 #define VGE_NEXT_RXDESC(x) ((x + 1) & VGE_NRXDESC_MASK)
130 #define VGE_PREV_RXDESC(x) ((x - 1) & VGE_NRXDESC_MASK)
131
132 #define VGE_ADDR_LO(y) ((uint64_t)(y) & 0xFFFFFFFF)
133 #define VGE_ADDR_HI(y) ((uint64_t)(y) >> 32)
134 #define VGE_BUFLEN(y) ((y) & 0x7FFF)
135 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
136
137 #define VGE_POWER_MANAGEMENT 0 /* disabled for now */
138
139 /*
140 * Mbuf adjust factor to force 32-bit alignment of IP header.
141 * Drivers should pad ETHER_ALIGN bytes when setting up a
142 * RX mbuf so the upper layers get the IP header properly aligned
143 * past the 14-byte Ethernet header.
144 *
145 * See also comment in vge_encap().
146 */
147 #define ETHER_ALIGN 2
148
149 #ifdef __NO_STRICT_ALIGNMENT
150 #define VGE_RX_BUFSIZE MCLBYTES
151 #else
152 #define VGE_RX_PAD sizeof(uint32_t)
153 #define VGE_RX_BUFSIZE (MCLBYTES - VGE_RX_PAD)
154 #endif
155
156 /*
157 * Control structures are DMA'd to the vge chip. We allocate them in
158 * a single clump that maps to a single DMA segment to make several things
159 * easier.
160 */
161 struct vge_control_data {
162 /* TX descriptors */
163 struct vge_txdesc vcd_txdescs[VGE_NTXDESC];
164 /* RX descriptors */
165 struct vge_rxdesc vcd_rxdescs[VGE_NRXDESC];
166 /* dummy data for TX padding */
167 uint8_t vcd_pad[ETHER_PAD_LEN];
168 };
169
170 #define VGE_CDOFF(x) offsetof(struct vge_control_data, x)
171 #define VGE_CDTXOFF(x) VGE_CDOFF(vcd_txdescs[(x)])
172 #define VGE_CDRXOFF(x) VGE_CDOFF(vcd_rxdescs[(x)])
173 #define VGE_CDPADOFF() VGE_CDOFF(vcd_pad[0])
174
175 /*
176 * Software state for TX jobs.
177 */
178 struct vge_txsoft {
179 struct mbuf *txs_mbuf; /* head of our mbuf chain */
180 bus_dmamap_t txs_dmamap; /* our DMA map */
181 };
182
183 /*
184 * Software state for RX jobs.
185 */
186 struct vge_rxsoft {
187 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
188 bus_dmamap_t rxs_dmamap; /* our DMA map */
189 };
190
191
192 struct vge_softc {
193 struct device sc_dev;
194
195 bus_space_tag_t sc_bst; /* bus space tag */
196 bus_space_handle_t sc_bsh; /* bus space handle */
197 bus_dma_tag_t sc_dmat;
198
199 struct ethercom sc_ethercom; /* interface info */
200 uint8_t sc_eaddr[ETHER_ADDR_LEN];
201
202 void *sc_intrhand;
203 struct mii_data sc_mii;
204 uint8_t sc_type;
205 int sc_if_flags;
206 int sc_link;
207 int sc_camidx;
208 callout_t sc_timeout;
209
210 bus_dmamap_t sc_cddmamap;
211 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
212
213 struct vge_txsoft sc_txsoft[VGE_NTXDESC];
214 struct vge_rxsoft sc_rxsoft[VGE_NRXDESC];
215 struct vge_control_data *sc_control_data;
216 #define sc_txdescs sc_control_data->vcd_txdescs
217 #define sc_rxdescs sc_control_data->vcd_rxdescs
218
219 int sc_tx_prodidx;
220 int sc_tx_considx;
221 int sc_tx_free;
222
223 struct mbuf *sc_rx_mhead;
224 struct mbuf *sc_rx_mtail;
225 int sc_rx_prodidx;
226 int sc_rx_consumed;
227
228 int sc_suspended; /* 0 = normal 1 = suspended */
229 uint32_t sc_saved_maps[5]; /* pci data */
230 uint32_t sc_saved_biosaddr;
231 uint8_t sc_saved_intline;
232 uint8_t sc_saved_cachelnsz;
233 uint8_t sc_saved_lattimer;
234 };
235
236 #define VGE_CDTXADDR(sc, x) ((sc)->sc_cddma + VGE_CDTXOFF(x))
237 #define VGE_CDRXADDR(sc, x) ((sc)->sc_cddma + VGE_CDRXOFF(x))
238 #define VGE_CDPADADDR(sc) ((sc)->sc_cddma + VGE_CDPADOFF())
239
240 #define VGE_TXDESCSYNC(sc, idx, ops) \
241 bus_dmamap_sync((sc)->sc_dmat,(sc)->sc_cddmamap, \
242 VGE_CDTXOFF(idx), \
243 offsetof(struct vge_txdesc, td_frag[0]), \
244 (ops))
245 #define VGE_TXFRAGSYNC(sc, idx, nsegs, ops) \
246 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
247 VGE_CDTXOFF(idx) + \
248 offsetof(struct vge_txdesc, td_frag[0]), \
249 sizeof(struct vge_txfrag) * (nsegs), \
250 (ops))
251 #define VGE_RXDESCSYNC(sc, idx, ops) \
252 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
253 VGE_CDRXOFF(idx), \
254 sizeof(struct vge_rxdesc), \
255 (ops))
256
257 /*
258 * register space access macros
259 */
260 #define CSR_WRITE_4(sc, reg, val) \
261 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
262 #define CSR_WRITE_2(sc, reg, val) \
263 bus_space_write_2((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
264 #define CSR_WRITE_1(sc, reg, val) \
265 bus_space_write_1((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
266
267 #define CSR_READ_4(sc, reg) \
268 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
269 #define CSR_READ_2(sc, reg) \
270 bus_space_read_2((sc)->sc_bst, (sc)->sc_bsh, (reg))
271 #define CSR_READ_1(sc, reg) \
272 bus_space_read_1((sc)->sc_bst, (sc)->sc_bsh, (reg))
273
274 #define CSR_SETBIT_1(sc, reg, x) \
275 CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) | (x))
276 #define CSR_SETBIT_2(sc, reg, x) \
277 CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) | (x))
278 #define CSR_SETBIT_4(sc, reg, x) \
279 CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) | (x))
280
281 #define CSR_CLRBIT_1(sc, reg, x) \
282 CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) & ~(x))
283 #define CSR_CLRBIT_2(sc, reg, x) \
284 CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) & ~(x))
285 #define CSR_CLRBIT_4(sc, reg, x) \
286 CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) & ~(x))
287
288 #define VGE_TIMEOUT 10000
289
290 #define VGE_PCI_LOIO 0x10
291 #define VGE_PCI_LOMEM 0x14
292
293 static inline void vge_set_txaddr(struct vge_txfrag *, bus_addr_t);
294 static inline void vge_set_rxaddr(struct vge_rxdesc *, bus_addr_t);
295
296 static int vge_ifflags_cb(struct ethercom *);
297
298 static int vge_match(struct device *, struct cfdata *, void *);
299 static void vge_attach(struct device *, struct device *, void *);
300
301 static int vge_encap(struct vge_softc *, struct mbuf *, int);
302
303 static int vge_allocmem(struct vge_softc *);
304 static int vge_newbuf(struct vge_softc *, int, struct mbuf *);
305 #ifndef __NO_STRICT_ALIGNMENT
306 static inline void vge_fixup_rx(struct mbuf *);
307 #endif
308 static void vge_rxeof(struct vge_softc *);
309 static void vge_txeof(struct vge_softc *);
310 static int vge_intr(void *);
311 static void vge_tick(void *);
312 static void vge_start(struct ifnet *);
313 static int vge_ioctl(struct ifnet *, u_long, void *);
314 static int vge_init(struct ifnet *);
315 static void vge_stop(struct vge_softc *);
316 static void vge_watchdog(struct ifnet *);
317 #if VGE_POWER_MANAGEMENT
318 static int vge_suspend(struct device *);
319 static int vge_resume(struct device *);
320 #endif
321 static void vge_shutdown(void *);
322
323 static uint16_t vge_read_eeprom(struct vge_softc *, int);
324
325 static void vge_miipoll_start(struct vge_softc *);
326 static void vge_miipoll_stop(struct vge_softc *);
327 static int vge_miibus_readreg(struct device *, int, int);
328 static void vge_miibus_writereg(struct device *, int, int, int);
329 static void vge_miibus_statchg(struct device *);
330
331 static void vge_cam_clear(struct vge_softc *);
332 static int vge_cam_set(struct vge_softc *, uint8_t *);
333 static void vge_setmulti(struct vge_softc *);
334 static void vge_reset(struct vge_softc *);
335
336 CFATTACH_DECL(vge, sizeof(struct vge_softc),
337 vge_match, vge_attach, NULL, NULL);
338
339 static inline void
340 vge_set_txaddr(struct vge_txfrag *f, bus_addr_t daddr)
341 {
342
343 f->tf_addrlo = htole32((uint32_t)daddr);
344 if (sizeof(bus_addr_t) == sizeof(uint64_t))
345 f->tf_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF);
346 else
347 f->tf_addrhi = 0;
348 }
349
350 static inline void
351 vge_set_rxaddr(struct vge_rxdesc *rxd, bus_addr_t daddr)
352 {
353
354 rxd->rd_addrlo = htole32((uint32_t)daddr);
355 if (sizeof(bus_addr_t) == sizeof(uint64_t))
356 rxd->rd_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF);
357 else
358 rxd->rd_addrhi = 0;
359 }
360
361 /*
362 * Defragment mbuf chain contents to be as linear as possible.
363 * Returns new mbuf chain on success, NULL on failure. Old mbuf
364 * chain is always freed.
365 * XXX temporary until there would be generic function doing this.
366 */
367 #define m_defrag vge_m_defrag
368 struct mbuf * vge_m_defrag(struct mbuf *, int);
369
370 struct mbuf *
371 vge_m_defrag(struct mbuf *mold, int flags)
372 {
373 struct mbuf *m0, *mn, *n;
374 size_t sz = mold->m_pkthdr.len;
375
376 #ifdef DIAGNOSTIC
377 if ((mold->m_flags & M_PKTHDR) == 0)
378 panic("m_defrag: not a mbuf chain header");
379 #endif
380
381 MGETHDR(m0, flags, MT_DATA);
382 if (m0 == NULL)
383 return NULL;
384 m0->m_pkthdr.len = mold->m_pkthdr.len;
385 mn = m0;
386
387 do {
388 if (sz > MHLEN) {
389 MCLGET(mn, M_DONTWAIT);
390 if ((mn->m_flags & M_EXT) == 0) {
391 m_freem(m0);
392 return NULL;
393 }
394 }
395
396 mn->m_len = MIN(sz, MCLBYTES);
397
398 m_copydata(mold, mold->m_pkthdr.len - sz, mn->m_len,
399 mtod(mn, void *));
400
401 sz -= mn->m_len;
402
403 if (sz > 0) {
404 /* need more mbufs */
405 MGET(n, M_NOWAIT, MT_DATA);
406 if (n == NULL) {
407 m_freem(m0);
408 return NULL;
409 }
410
411 mn->m_next = n;
412 mn = n;
413 }
414 } while (sz > 0);
415
416 return m0;
417 }
418
419 /*
420 * Read a word of data stored in the EEPROM at address 'addr.'
421 */
422 static uint16_t
423 vge_read_eeprom(struct vge_softc *sc, int addr)
424 {
425 int i;
426 uint16_t word = 0;
427
428 /*
429 * Enter EEPROM embedded programming mode. In order to
430 * access the EEPROM at all, we first have to set the
431 * EELOAD bit in the CHIPCFG2 register.
432 */
433 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
434 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
435
436 /* Select the address of the word we want to read */
437 CSR_WRITE_1(sc, VGE_EEADDR, addr);
438
439 /* Issue read command */
440 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
441
442 /* Wait for the done bit to be set. */
443 for (i = 0; i < VGE_TIMEOUT; i++) {
444 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
445 break;
446 }
447
448 if (i == VGE_TIMEOUT) {
449 aprint_error_dev(&sc->sc_dev, "EEPROM read timed out\n");
450 return 0;
451 }
452
453 /* Read the result */
454 word = CSR_READ_2(sc, VGE_EERDDAT);
455
456 /* Turn off EEPROM access mode. */
457 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
458 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
459
460 return word;
461 }
462
463 static void
464 vge_miipoll_stop(struct vge_softc *sc)
465 {
466 int i;
467
468 CSR_WRITE_1(sc, VGE_MIICMD, 0);
469
470 for (i = 0; i < VGE_TIMEOUT; i++) {
471 DELAY(1);
472 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
473 break;
474 }
475
476 if (i == VGE_TIMEOUT) {
477 aprint_error_dev(&sc->sc_dev, "failed to idle MII autopoll\n");
478 }
479 }
480
481 static void
482 vge_miipoll_start(struct vge_softc *sc)
483 {
484 int i;
485
486 /* First, make sure we're idle. */
487
488 CSR_WRITE_1(sc, VGE_MIICMD, 0);
489 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
490
491 for (i = 0; i < VGE_TIMEOUT; i++) {
492 DELAY(1);
493 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
494 break;
495 }
496
497 if (i == VGE_TIMEOUT) {
498 aprint_error_dev(&sc->sc_dev, "failed to idle MII autopoll\n");
499 return;
500 }
501
502 /* Now enable auto poll mode. */
503
504 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
505
506 /* And make sure it started. */
507
508 for (i = 0; i < VGE_TIMEOUT; i++) {
509 DELAY(1);
510 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
511 break;
512 }
513
514 if (i == VGE_TIMEOUT) {
515 aprint_error_dev(&sc->sc_dev, "failed to start MII autopoll\n");
516 }
517 }
518
519 static int
520 vge_miibus_readreg(struct device *dev, int phy, int reg)
521 {
522 struct vge_softc *sc;
523 int i, s;
524 uint16_t rval;
525
526 sc = (void *)dev;
527 rval = 0;
528 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
529 return 0;
530
531 s = splnet();
532 vge_miipoll_stop(sc);
533
534 /* Specify the register we want to read. */
535 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
536
537 /* Issue read command. */
538 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
539
540 /* Wait for the read command bit to self-clear. */
541 for (i = 0; i < VGE_TIMEOUT; i++) {
542 DELAY(1);
543 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
544 break;
545 }
546
547 if (i == VGE_TIMEOUT)
548 aprint_error_dev(&sc->sc_dev, "MII read timed out\n");
549 else
550 rval = CSR_READ_2(sc, VGE_MIIDATA);
551
552 vge_miipoll_start(sc);
553 splx(s);
554
555 return rval;
556 }
557
558 static void
559 vge_miibus_writereg(struct device *dev, int phy, int reg, int data)
560 {
561 struct vge_softc *sc;
562 int i, s;
563
564 sc = (void *)dev;
565 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
566 return;
567
568 s = splnet();
569 vge_miipoll_stop(sc);
570
571 /* Specify the register we want to write. */
572 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
573
574 /* Specify the data we want to write. */
575 CSR_WRITE_2(sc, VGE_MIIDATA, data);
576
577 /* Issue write command. */
578 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
579
580 /* Wait for the write command bit to self-clear. */
581 for (i = 0; i < VGE_TIMEOUT; i++) {
582 DELAY(1);
583 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
584 break;
585 }
586
587 if (i == VGE_TIMEOUT) {
588 aprint_error_dev(&sc->sc_dev, "MII write timed out\n");
589 }
590
591 vge_miipoll_start(sc);
592 splx(s);
593 }
594
595 static void
596 vge_cam_clear(struct vge_softc *sc)
597 {
598 int i;
599
600 /*
601 * Turn off all the mask bits. This tells the chip
602 * that none of the entries in the CAM filter are valid.
603 * desired entries will be enabled as we fill the filter in.
604 */
605
606 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
607 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
608 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
609 for (i = 0; i < 8; i++)
610 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
611
612 /* Clear the VLAN filter too. */
613
614 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
615 for (i = 0; i < 8; i++)
616 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
617
618 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
619 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
620 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
621
622 sc->sc_camidx = 0;
623 }
624
625 static int
626 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
627 {
628 int i, error;
629
630 error = 0;
631
632 if (sc->sc_camidx == VGE_CAM_MAXADDRS)
633 return ENOSPC;
634
635 /* Select the CAM data page. */
636 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
637 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
638
639 /* Set the filter entry we want to update and enable writing. */
640 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | sc->sc_camidx);
641
642 /* Write the address to the CAM registers */
643 for (i = 0; i < ETHER_ADDR_LEN; i++)
644 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
645
646 /* Issue a write command. */
647 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
648
649 /* Wake for it to clear. */
650 for (i = 0; i < VGE_TIMEOUT; i++) {
651 DELAY(1);
652 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
653 break;
654 }
655
656 if (i == VGE_TIMEOUT) {
657 aprint_error_dev(&sc->sc_dev, "setting CAM filter failed\n");
658 error = EIO;
659 goto fail;
660 }
661
662 /* Select the CAM mask page. */
663 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
664 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
665
666 /* Set the mask bit that enables this filter. */
667 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->sc_camidx / 8),
668 1 << (sc->sc_camidx & 7));
669
670 sc->sc_camidx++;
671
672 fail:
673 /* Turn off access to CAM. */
674 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
675 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
676 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
677
678 return error;
679 }
680
681 /*
682 * Program the multicast filter. We use the 64-entry CAM filter
683 * for perfect filtering. If there's more than 64 multicast addresses,
684 * we use the hash filter instead.
685 */
686 static void
687 vge_setmulti(struct vge_softc *sc)
688 {
689 struct ifnet *ifp;
690 int error;
691 uint32_t h, hashes[2] = { 0, 0 };
692 struct ether_multi *enm;
693 struct ether_multistep step;
694
695 error = 0;
696 ifp = &sc->sc_ethercom.ec_if;
697
698 /* First, zot all the multicast entries. */
699 vge_cam_clear(sc);
700 CSR_WRITE_4(sc, VGE_MAR0, 0);
701 CSR_WRITE_4(sc, VGE_MAR1, 0);
702 ifp->if_flags &= ~IFF_ALLMULTI;
703
704 /*
705 * If the user wants allmulti or promisc mode, enable reception
706 * of all multicast frames.
707 */
708 if (ifp->if_flags & IFF_PROMISC) {
709 allmulti:
710 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
711 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
712 ifp->if_flags |= IFF_ALLMULTI;
713 return;
714 }
715
716 /* Now program new ones */
717 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
718 while (enm != NULL) {
719 /*
720 * If multicast range, fall back to ALLMULTI.
721 */
722 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
723 ETHER_ADDR_LEN) != 0)
724 goto allmulti;
725
726 error = vge_cam_set(sc, enm->enm_addrlo);
727 if (error)
728 break;
729
730 ETHER_NEXT_MULTI(step, enm);
731 }
732
733 /* If there were too many addresses, use the hash filter. */
734 if (error) {
735 vge_cam_clear(sc);
736
737 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
738 while (enm != NULL) {
739 /*
740 * If multicast range, fall back to ALLMULTI.
741 */
742 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
743 ETHER_ADDR_LEN) != 0)
744 goto allmulti;
745
746 h = ether_crc32_be(enm->enm_addrlo,
747 ETHER_ADDR_LEN) >> 26;
748 hashes[h >> 5] |= 1 << (h & 0x1f);
749
750 ETHER_NEXT_MULTI(step, enm);
751 }
752
753 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
754 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
755 }
756 }
757
758 static void
759 vge_reset(struct vge_softc *sc)
760 {
761 int i;
762
763 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
764
765 for (i = 0; i < VGE_TIMEOUT; i++) {
766 DELAY(5);
767 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
768 break;
769 }
770
771 if (i == VGE_TIMEOUT) {
772 aprint_error_dev(&sc->sc_dev, "soft reset timed out");
773 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
774 DELAY(2000);
775 }
776
777 DELAY(5000);
778
779 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
780
781 for (i = 0; i < VGE_TIMEOUT; i++) {
782 DELAY(5);
783 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
784 break;
785 }
786
787 if (i == VGE_TIMEOUT) {
788 aprint_error_dev(&sc->sc_dev, "EEPROM reload timed out\n");
789 return;
790 }
791
792 /*
793 * On some machine, the first read data from EEPROM could be
794 * messed up, so read one dummy data here to avoid the mess.
795 */
796 (void)vge_read_eeprom(sc, 0);
797
798 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
799 }
800
801 /*
802 * Probe for a VIA gigabit chip. Check the PCI vendor and device
803 * IDs against our list and return a device name if we find a match.
804 */
805 static int
806 vge_match(struct device *parent, struct cfdata *match, void *aux)
807 {
808 struct pci_attach_args *pa = aux;
809
810 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH
811 && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X)
812 return 1;
813
814 return 0;
815 }
816
817 static int
818 vge_allocmem(struct vge_softc *sc)
819 {
820 int error;
821 int nseg;
822 int i;
823 bus_dma_segment_t seg;
824
825 /*
826 * Allocate memory for control data.
827 */
828
829 error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vge_control_data),
830 VGE_RING_ALIGN, 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
831 if (error) {
832 aprint_error_dev(&sc->sc_dev, "could not allocate control data dma memory\n");
833 goto fail_1;
834 }
835
836 /* Map the memory to kernel VA space */
837
838 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
839 sizeof(struct vge_control_data), (void **)&sc->sc_control_data,
840 BUS_DMA_NOWAIT);
841 if (error) {
842 aprint_error_dev(&sc->sc_dev, "could not map control data dma memory\n");
843 goto fail_2;
844 }
845 memset(sc->sc_control_data, 0, sizeof(struct vge_control_data));
846
847 /*
848 * Create map for control data.
849 */
850 error = bus_dmamap_create(sc->sc_dmat,
851 sizeof(struct vge_control_data), 1,
852 sizeof(struct vge_control_data), 0, BUS_DMA_NOWAIT,
853 &sc->sc_cddmamap);
854 if (error) {
855 aprint_error_dev(&sc->sc_dev, "could not create control data dmamap\n");
856 goto fail_3;
857 }
858
859 /* Load the map for the control data. */
860 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
861 sc->sc_control_data, sizeof(struct vge_control_data), NULL,
862 BUS_DMA_NOWAIT);
863 if (error) {
864 aprint_error_dev(&sc->sc_dev, "could not load control data dma memory\n");
865 goto fail_4;
866 }
867
868 /* Create DMA maps for TX buffers */
869
870 for (i = 0; i < VGE_NTXDESC; i++) {
871 error = bus_dmamap_create(sc->sc_dmat, VGE_TX_MAXLEN,
872 VGE_TX_FRAGS, VGE_TX_MAXLEN, 0, BUS_DMA_NOWAIT,
873 &sc->sc_txsoft[i].txs_dmamap);
874 if (error) {
875 aprint_error_dev(&sc->sc_dev, "can't create DMA map for TX descs\n");
876 goto fail_5;
877 }
878 }
879
880 /* Create DMA maps for RX buffers */
881
882 for (i = 0; i < VGE_NRXDESC; i++) {
883 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
884 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
885 &sc->sc_rxsoft[i].rxs_dmamap);
886 if (error) {
887 aprint_error_dev(&sc->sc_dev, "can't create DMA map for RX descs\n");
888 goto fail_6;
889 }
890 sc->sc_rxsoft[i].rxs_mbuf = NULL;
891 }
892
893 return 0;
894
895 fail_6:
896 for (i = 0; i < VGE_NRXDESC; i++) {
897 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
898 bus_dmamap_destroy(sc->sc_dmat,
899 sc->sc_rxsoft[i].rxs_dmamap);
900 }
901 fail_5:
902 for (i = 0; i < VGE_NTXDESC; i++) {
903 if (sc->sc_txsoft[i].txs_dmamap != NULL)
904 bus_dmamap_destroy(sc->sc_dmat,
905 sc->sc_txsoft[i].txs_dmamap);
906 }
907 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
908 fail_4:
909 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
910 fail_3:
911 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
912 sizeof(struct vge_control_data));
913 fail_2:
914 bus_dmamem_free(sc->sc_dmat, &seg, nseg);
915 fail_1:
916 return ENOMEM;
917 }
918
919 /*
920 * Attach the interface. Allocate softc structures, do ifmedia
921 * setup and ethernet/BPF attach.
922 */
923 static void
924 vge_attach(struct device *parent, struct device *self, void *aux)
925 {
926 uint8_t *eaddr;
927 struct vge_softc *sc = (void *)self;
928 struct ifnet *ifp;
929 struct pci_attach_args *pa = aux;
930 pci_chipset_tag_t pc = pa->pa_pc;
931 const char *intrstr;
932 pci_intr_handle_t ih;
933 uint16_t val;
934
935 aprint_normal(": VIA VT612X Gigabit Ethernet (rev. %#x)\n",
936 PCI_REVISION(pa->pa_class));
937
938 /* Make sure bus-mastering is enabled */
939 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
940 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
941 PCI_COMMAND_MASTER_ENABLE);
942
943 /*
944 * Map control/status registers.
945 */
946 if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
947 &sc->sc_bst, &sc->sc_bsh, NULL, NULL) != 0) {
948 aprint_error_dev(&sc->sc_dev, "couldn't map memory\n");
949 return;
950 }
951
952 /*
953 * Map and establish our interrupt.
954 */
955 if (pci_intr_map(pa, &ih)) {
956 aprint_error_dev(&sc->sc_dev, "unable to map interrupt\n");
957 return;
958 }
959 intrstr = pci_intr_string(pc, ih);
960 sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc);
961 if (sc->sc_intrhand == NULL) {
962 aprint_error_dev(&sc->sc_dev, "unable to establish interrupt");
963 if (intrstr != NULL)
964 aprint_error(" at %s", intrstr);
965 aprint_error("\n");
966 return;
967 }
968 aprint_normal_dev(&sc->sc_dev, "interrupting at %s\n", intrstr);
969
970 /* Reset the adapter. */
971 vge_reset(sc);
972
973 /*
974 * Get station address from the EEPROM.
975 */
976 eaddr = sc->sc_eaddr;
977 val = vge_read_eeprom(sc, VGE_EE_EADDR + 0);
978 eaddr[0] = val & 0xff;
979 eaddr[1] = val >> 8;
980 val = vge_read_eeprom(sc, VGE_EE_EADDR + 1);
981 eaddr[2] = val & 0xff;
982 eaddr[3] = val >> 8;
983 val = vge_read_eeprom(sc, VGE_EE_EADDR + 2);
984 eaddr[4] = val & 0xff;
985 eaddr[5] = val >> 8;
986
987 aprint_normal_dev(&sc->sc_dev, "Ethernet address: %s\n",
988 ether_sprintf(eaddr));
989
990 /*
991 * Use the 32bit tag. Hardware supports 48bit physical addresses,
992 * but we don't use that for now.
993 */
994 sc->sc_dmat = pa->pa_dmat;
995
996 if (vge_allocmem(sc) != 0)
997 return;
998
999 ifp = &sc->sc_ethercom.ec_if;
1000 ifp->if_softc = sc;
1001 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
1002 ifp->if_mtu = ETHERMTU;
1003 ifp->if_baudrate = IF_Gbps(1);
1004 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1005 ifp->if_ioctl = vge_ioctl;
1006 ifp->if_start = vge_start;
1007
1008 /*
1009 * We can support 802.1Q VLAN-sized frames and jumbo
1010 * Ethernet frames.
1011 */
1012 sc->sc_ethercom.ec_capabilities |=
1013 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU |
1014 ETHERCAP_VLAN_HWTAGGING;
1015
1016 /*
1017 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
1018 */
1019 ifp->if_capabilities |=
1020 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1021 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1022 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
1023
1024 #ifdef DEVICE_POLLING
1025 #ifdef IFCAP_POLLING
1026 ifp->if_capabilities |= IFCAP_POLLING;
1027 #endif
1028 #endif
1029 ifp->if_watchdog = vge_watchdog;
1030 ifp->if_init = vge_init;
1031 IFQ_SET_MAXLEN(&ifp->if_snd, max(VGE_IFQ_MAXLEN, IFQ_MAXLEN));
1032
1033 /*
1034 * Initialize our media structures and probe the MII.
1035 */
1036 sc->sc_mii.mii_ifp = ifp;
1037 sc->sc_mii.mii_readreg = vge_miibus_readreg;
1038 sc->sc_mii.mii_writereg = vge_miibus_writereg;
1039 sc->sc_mii.mii_statchg = vge_miibus_statchg;
1040
1041 sc->sc_ethercom.ec_mii = &sc->sc_mii;
1042 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
1043 ether_mediastatus);
1044 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1045 MII_OFFSET_ANY, MIIF_DOPAUSE);
1046 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1047 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1048 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1049 } else
1050 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1051
1052 /*
1053 * Attach the interface.
1054 */
1055 if_attach(ifp);
1056 ether_ifattach(ifp, eaddr);
1057 ether_set_ifflags_cb(&sc->sc_ethercom, vge_ifflags_cb);
1058
1059 callout_init(&sc->sc_timeout, 0);
1060 callout_setfunc(&sc->sc_timeout, vge_tick, sc);
1061
1062 /*
1063 * Make sure the interface is shutdown during reboot.
1064 */
1065 if (shutdownhook_establish(vge_shutdown, sc) == NULL) {
1066 aprint_error_dev(&sc->sc_dev, "WARNING: unable to establish shutdown hook\n");
1067 }
1068 }
1069
1070 static int
1071 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m)
1072 {
1073 struct mbuf *m_new;
1074 struct vge_rxdesc *rxd;
1075 struct vge_rxsoft *rxs;
1076 bus_dmamap_t map;
1077 int i;
1078 #ifdef DIAGNOSTIC
1079 uint32_t rd_sts;
1080 #endif
1081
1082 m_new = NULL;
1083 if (m == NULL) {
1084 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1085 if (m_new == NULL)
1086 return ENOBUFS;
1087
1088 MCLGET(m_new, M_DONTWAIT);
1089 if ((m_new->m_flags & M_EXT) == 0) {
1090 m_freem(m_new);
1091 return ENOBUFS;
1092 }
1093
1094 m = m_new;
1095 } else
1096 m->m_data = m->m_ext.ext_buf;
1097
1098
1099 /*
1100 * This is part of an evil trick to deal with non-x86 platforms.
1101 * The VIA chip requires RX buffers to be aligned on 32-bit
1102 * boundaries, but that will hose non-x86 machines. To get around
1103 * this, we leave some empty space at the start of each buffer
1104 * and for non-x86 hosts, we copy the buffer back two bytes
1105 * to achieve word alignment. This is slightly more efficient
1106 * than allocating a new buffer, copying the contents, and
1107 * discarding the old buffer.
1108 */
1109 m->m_len = m->m_pkthdr.len = VGE_RX_BUFSIZE;
1110 #ifndef __NO_STRICT_ALIGNMENT
1111 m->m_data += VGE_RX_PAD;
1112 #endif
1113 rxs = &sc->sc_rxsoft[idx];
1114 map = rxs->rxs_dmamap;
1115
1116 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0)
1117 goto out;
1118
1119 rxd = &sc->sc_rxdescs[idx];
1120
1121 #ifdef DIAGNOSTIC
1122 /* If this descriptor is still owned by the chip, bail. */
1123 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1124 rd_sts = le32toh(rxd->rd_sts);
1125 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1126 if (rd_sts & VGE_RDSTS_OWN) {
1127 panic("%s: tried to map busy RX descriptor",
1128 device_xname(&sc->sc_dev));
1129 }
1130 #endif
1131
1132 rxs->rxs_mbuf = m;
1133 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1134 BUS_DMASYNC_PREREAD);
1135
1136 rxd->rd_buflen =
1137 htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I);
1138 vge_set_rxaddr(rxd, map->dm_segs[0].ds_addr);
1139 rxd->rd_sts = 0;
1140 rxd->rd_ctl = 0;
1141 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1142
1143 /*
1144 * Note: the manual fails to document the fact that for
1145 * proper opration, the driver needs to replentish the RX
1146 * DMA ring 4 descriptors at a time (rather than one at a
1147 * time, like most chips). We can allocate the new buffers
1148 * but we should not set the OWN bits until we're ready
1149 * to hand back 4 of them in one shot.
1150 */
1151
1152 #define VGE_RXCHUNK 4
1153 sc->sc_rx_consumed++;
1154 if (sc->sc_rx_consumed == VGE_RXCHUNK) {
1155 for (i = idx; i != idx - VGE_RXCHUNK; i--) {
1156 KASSERT(i >= 0);
1157 sc->sc_rxdescs[i].rd_sts |= htole32(VGE_RDSTS_OWN);
1158 VGE_RXDESCSYNC(sc, i,
1159 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1160 }
1161 sc->sc_rx_consumed = 0;
1162 }
1163
1164 return 0;
1165 out:
1166 if (m_new != NULL)
1167 m_freem(m_new);
1168 return ENOMEM;
1169 }
1170
1171 #ifndef __NO_STRICT_ALIGNMENT
1172 static inline void
1173 vge_fixup_rx(struct mbuf *m)
1174 {
1175 int i;
1176 uint16_t *src, *dst;
1177
1178 src = mtod(m, uint16_t *);
1179 dst = src - 1;
1180
1181 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1182 *dst++ = *src++;
1183
1184 m->m_data -= ETHER_ALIGN;
1185 }
1186 #endif
1187
1188 /*
1189 * RX handler. We support the reception of jumbo frames that have
1190 * been fragmented across multiple 2K mbuf cluster buffers.
1191 */
1192 static void
1193 vge_rxeof(struct vge_softc *sc)
1194 {
1195 struct mbuf *m;
1196 struct ifnet *ifp;
1197 int idx, total_len, lim;
1198 struct vge_rxdesc *cur_rxd;
1199 struct vge_rxsoft *rxs;
1200 uint32_t rxstat, rxctl;
1201
1202 ifp = &sc->sc_ethercom.ec_if;
1203 lim = 0;
1204
1205 /* Invalidate the descriptor memory */
1206
1207 for (idx = sc->sc_rx_prodidx;; idx = VGE_NEXT_RXDESC(idx)) {
1208 cur_rxd = &sc->sc_rxdescs[idx];
1209
1210 VGE_RXDESCSYNC(sc, idx,
1211 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1212 rxstat = le32toh(cur_rxd->rd_sts);
1213 if ((rxstat & VGE_RDSTS_OWN) != 0) {
1214 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1215 break;
1216 }
1217
1218 rxctl = le32toh(cur_rxd->rd_ctl);
1219 rxs = &sc->sc_rxsoft[idx];
1220 m = rxs->rxs_mbuf;
1221 total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16;
1222
1223 /* Invalidate the RX mbuf and unload its map */
1224
1225 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap,
1226 0, rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1227 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1228
1229 /*
1230 * If the 'start of frame' bit is set, this indicates
1231 * either the first fragment in a multi-fragment receive,
1232 * or an intermediate fragment. Either way, we want to
1233 * accumulate the buffers.
1234 */
1235 if (rxstat & VGE_RXPKT_SOF) {
1236 m->m_len = VGE_RX_BUFSIZE;
1237 if (sc->sc_rx_mhead == NULL)
1238 sc->sc_rx_mhead = sc->sc_rx_mtail = m;
1239 else {
1240 m->m_flags &= ~M_PKTHDR;
1241 sc->sc_rx_mtail->m_next = m;
1242 sc->sc_rx_mtail = m;
1243 }
1244 vge_newbuf(sc, idx, NULL);
1245 continue;
1246 }
1247
1248 /*
1249 * Bad/error frames will have the RXOK bit cleared.
1250 * However, there's one error case we want to allow:
1251 * if a VLAN tagged frame arrives and the chip can't
1252 * match it against the CAM filter, it considers this
1253 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1254 * We don't want to drop the frame though: our VLAN
1255 * filtering is done in software.
1256 */
1257 if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
1258 (rxstat & VGE_RDSTS_VIDM) == 0 &&
1259 (rxstat & VGE_RDSTS_CSUMERR) == 0) {
1260 ifp->if_ierrors++;
1261 /*
1262 * If this is part of a multi-fragment packet,
1263 * discard all the pieces.
1264 */
1265 if (sc->sc_rx_mhead != NULL) {
1266 m_freem(sc->sc_rx_mhead);
1267 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
1268 }
1269 vge_newbuf(sc, idx, m);
1270 continue;
1271 }
1272
1273 /*
1274 * If allocating a replacement mbuf fails,
1275 * reload the current one.
1276 */
1277
1278 if (vge_newbuf(sc, idx, NULL)) {
1279 ifp->if_ierrors++;
1280 if (sc->sc_rx_mhead != NULL) {
1281 m_freem(sc->sc_rx_mhead);
1282 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
1283 }
1284 vge_newbuf(sc, idx, m);
1285 continue;
1286 }
1287
1288 if (sc->sc_rx_mhead != NULL) {
1289 m->m_len = total_len % VGE_RX_BUFSIZE;
1290 /*
1291 * Special case: if there's 4 bytes or less
1292 * in this buffer, the mbuf can be discarded:
1293 * the last 4 bytes is the CRC, which we don't
1294 * care about anyway.
1295 */
1296 if (m->m_len <= ETHER_CRC_LEN) {
1297 sc->sc_rx_mtail->m_len -=
1298 (ETHER_CRC_LEN - m->m_len);
1299 m_freem(m);
1300 } else {
1301 m->m_len -= ETHER_CRC_LEN;
1302 m->m_flags &= ~M_PKTHDR;
1303 sc->sc_rx_mtail->m_next = m;
1304 }
1305 m = sc->sc_rx_mhead;
1306 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
1307 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1308 } else
1309 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1310
1311 #ifndef __NO_STRICT_ALIGNMENT
1312 vge_fixup_rx(m);
1313 #endif
1314 ifp->if_ipackets++;
1315 m->m_pkthdr.rcvif = ifp;
1316
1317 /* Do RX checksumming if enabled */
1318 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) {
1319
1320 /* Check IP header checksum */
1321 if (rxctl & VGE_RDCTL_IPPKT)
1322 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1323 if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0)
1324 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1325 }
1326
1327 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
1328 /* Check UDP checksum */
1329 if (rxctl & VGE_RDCTL_TCPPKT)
1330 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1331
1332 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1333 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1334 }
1335
1336 if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) {
1337 /* Check UDP checksum */
1338 if (rxctl & VGE_RDCTL_UDPPKT)
1339 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1340
1341 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1342 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1343 }
1344
1345 if (rxstat & VGE_RDSTS_VTAG) {
1346 /*
1347 * We use bswap16() here because:
1348 * On LE machines, tag is stored in BE as stream data.
1349 * On BE machines, tag is stored in BE as stream data
1350 * but it was already swapped by le32toh() above.
1351 */
1352 VLAN_INPUT_TAG(ifp, m,
1353 bswap16(rxctl & VGE_RDCTL_VLANID), continue);
1354 }
1355
1356 #if NBPFILTER > 0
1357 /*
1358 * Handle BPF listeners.
1359 */
1360 if (ifp->if_bpf)
1361 bpf_mtap(ifp->if_bpf, m);
1362 #endif
1363
1364 (*ifp->if_input)(ifp, m);
1365
1366 lim++;
1367 if (lim == VGE_NRXDESC)
1368 break;
1369 }
1370
1371 sc->sc_rx_prodidx = idx;
1372 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1373 }
1374
1375 static void
1376 vge_txeof(struct vge_softc *sc)
1377 {
1378 struct ifnet *ifp;
1379 struct vge_txsoft *txs;
1380 uint32_t txstat;
1381 int idx;
1382
1383 ifp = &sc->sc_ethercom.ec_if;
1384
1385 for (idx = sc->sc_tx_considx;
1386 sc->sc_tx_free < VGE_NTXDESC;
1387 idx = VGE_NEXT_TXDESC(idx), sc->sc_tx_free++) {
1388 VGE_TXDESCSYNC(sc, idx,
1389 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1390 txstat = le32toh(sc->sc_txdescs[idx].td_sts);
1391 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1392 if (txstat & VGE_TDSTS_OWN) {
1393 break;
1394 }
1395
1396 txs = &sc->sc_txsoft[idx];
1397 m_freem(txs->txs_mbuf);
1398 txs->txs_mbuf = NULL;
1399 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0,
1400 txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1401 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1402 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1403 ifp->if_collisions++;
1404 if (txstat & VGE_TDSTS_TXERR)
1405 ifp->if_oerrors++;
1406 else
1407 ifp->if_opackets++;
1408 }
1409
1410 sc->sc_tx_considx = idx;
1411
1412 if (sc->sc_tx_free > 0) {
1413 ifp->if_flags &= ~IFF_OACTIVE;
1414 }
1415
1416 /*
1417 * If not all descriptors have been released reaped yet,
1418 * reload the timer so that we will eventually get another
1419 * interrupt that will cause us to re-enter this routine.
1420 * This is done in case the transmitter has gone idle.
1421 */
1422 if (sc->sc_tx_free < VGE_NTXDESC)
1423 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1424 else
1425 ifp->if_timer = 0;
1426 }
1427
1428 static void
1429 vge_tick(void *xsc)
1430 {
1431 struct vge_softc *sc;
1432 struct ifnet *ifp;
1433 struct mii_data *mii;
1434 int s;
1435
1436 sc = xsc;
1437 ifp = &sc->sc_ethercom.ec_if;
1438 mii = &sc->sc_mii;
1439
1440 s = splnet();
1441
1442 callout_schedule(&sc->sc_timeout, hz);
1443
1444 mii_tick(mii);
1445 if (sc->sc_link) {
1446 if ((mii->mii_media_status & IFM_ACTIVE) == 0)
1447 sc->sc_link = 0;
1448 } else {
1449 if (mii->mii_media_status & IFM_ACTIVE &&
1450 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1451 sc->sc_link = 1;
1452 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1453 vge_start(ifp);
1454 }
1455 }
1456
1457 splx(s);
1458 }
1459
1460 static int
1461 vge_intr(void *arg)
1462 {
1463 struct vge_softc *sc;
1464 struct ifnet *ifp;
1465 uint32_t status;
1466 int claim;
1467
1468 sc = arg;
1469 claim = 0;
1470 if (sc->sc_suspended) {
1471 return claim;
1472 }
1473
1474 ifp = &sc->sc_ethercom.ec_if;
1475
1476 if ((ifp->if_flags & IFF_UP) == 0) {
1477 return claim;
1478 }
1479
1480 /* Disable interrupts */
1481 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1482
1483 for (;;) {
1484
1485 status = CSR_READ_4(sc, VGE_ISR);
1486 /* If the card has gone away the read returns 0xffff. */
1487 if (status == 0xFFFFFFFF)
1488 break;
1489
1490 if (status) {
1491 claim = 1;
1492 CSR_WRITE_4(sc, VGE_ISR, status);
1493 }
1494
1495 if ((status & VGE_INTRS) == 0)
1496 break;
1497
1498 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1499 vge_rxeof(sc);
1500
1501 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1502 vge_rxeof(sc);
1503 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1504 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1505 }
1506
1507 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1508 vge_txeof(sc);
1509
1510 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1511 vge_init(ifp);
1512
1513 if (status & VGE_ISR_LINKSTS)
1514 vge_tick(sc);
1515 }
1516
1517 /* Re-enable interrupts */
1518 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1519
1520 if (claim && !IFQ_IS_EMPTY(&ifp->if_snd))
1521 vge_start(ifp);
1522
1523 return claim;
1524 }
1525
1526 static int
1527 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx)
1528 {
1529 struct vge_txsoft *txs;
1530 struct vge_txdesc *txd;
1531 struct vge_txfrag *f;
1532 struct mbuf *m_new;
1533 bus_dmamap_t map;
1534 int m_csumflags, seg, error, flags;
1535 struct m_tag *mtag;
1536 size_t sz;
1537 uint32_t td_sts, td_ctl;
1538
1539 KASSERT(sc->sc_tx_free > 0);
1540
1541 txd = &sc->sc_txdescs[idx];
1542
1543 #ifdef DIAGNOSTIC
1544 /* If this descriptor is still owned by the chip, bail. */
1545 VGE_TXDESCSYNC(sc, idx,
1546 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1547 td_sts = le32toh(txd->td_sts);
1548 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1549 if (td_sts & VGE_TDSTS_OWN) {
1550 return ENOBUFS;
1551 }
1552 #endif
1553
1554 /*
1555 * Preserve m_pkthdr.csum_flags here since m_head might be
1556 * updated by m_defrag()
1557 */
1558 m_csumflags = m_head->m_pkthdr.csum_flags;
1559
1560 txs = &sc->sc_txsoft[idx];
1561 map = txs->txs_dmamap;
1562 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head, BUS_DMA_NOWAIT);
1563
1564 /* If too many segments to map, coalesce */
1565 if (error == EFBIG ||
1566 (m_head->m_pkthdr.len < ETHER_PAD_LEN &&
1567 map->dm_nsegs == VGE_TX_FRAGS)) {
1568 m_new = m_defrag(m_head, M_DONTWAIT);
1569 if (m_new == NULL)
1570 return EFBIG;
1571
1572 error = bus_dmamap_load_mbuf(sc->sc_dmat, map,
1573 m_new, BUS_DMA_NOWAIT);
1574 if (error) {
1575 m_freem(m_new);
1576 return error;
1577 }
1578
1579 m_head = m_new;
1580 } else if (error)
1581 return error;
1582
1583 txs->txs_mbuf = m_head;
1584
1585 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1586 BUS_DMASYNC_PREWRITE);
1587
1588 for (seg = 0, f = &txd->td_frag[0]; seg < map->dm_nsegs; seg++, f++) {
1589 f->tf_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len));
1590 vge_set_txaddr(f, map->dm_segs[seg].ds_addr);
1591 }
1592
1593 /* Argh. This chip does not autopad short frames */
1594 sz = m_head->m_pkthdr.len;
1595 if (sz < ETHER_PAD_LEN) {
1596 f->tf_buflen = htole16(VGE_BUFLEN(ETHER_PAD_LEN - sz));
1597 vge_set_txaddr(f, VGE_CDPADADDR(sc));
1598 sz = ETHER_PAD_LEN;
1599 seg++;
1600 }
1601 VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE);
1602
1603 /*
1604 * When telling the chip how many segments there are, we
1605 * must use nsegs + 1 instead of just nsegs. Darned if I
1606 * know why.
1607 */
1608 seg++;
1609
1610 flags = 0;
1611 if (m_csumflags & M_CSUM_IPv4)
1612 flags |= VGE_TDCTL_IPCSUM;
1613 if (m_csumflags & M_CSUM_TCPv4)
1614 flags |= VGE_TDCTL_TCPCSUM;
1615 if (m_csumflags & M_CSUM_UDPv4)
1616 flags |= VGE_TDCTL_UDPCSUM;
1617 td_sts = sz << 16;
1618 td_ctl = flags | (seg << 28) | VGE_TD_LS_NORM;
1619
1620 if (sz > ETHERMTU + ETHER_HDR_LEN)
1621 td_ctl |= VGE_TDCTL_JUMBO;
1622
1623 /*
1624 * Set up hardware VLAN tagging.
1625 */
1626 mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m_head);
1627 if (mtag != NULL) {
1628 /*
1629 * No need htons() here since vge(4) chip assumes
1630 * that tags are written in little endian and
1631 * we already use htole32() here.
1632 */
1633 td_ctl |= VLAN_TAG_VALUE(mtag) | VGE_TDCTL_VTAG;
1634 }
1635 txd->td_ctl = htole32(td_ctl);
1636 txd->td_sts = htole32(td_sts);
1637 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1638
1639 txd->td_sts = htole32(VGE_TDSTS_OWN | td_sts);
1640 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1641
1642 sc->sc_tx_free--;
1643
1644 return 0;
1645 }
1646
1647 /*
1648 * Main transmit routine.
1649 */
1650
1651 static void
1652 vge_start(struct ifnet *ifp)
1653 {
1654 struct vge_softc *sc;
1655 struct vge_txsoft *txs;
1656 struct mbuf *m_head;
1657 int idx, pidx, ofree, error;
1658
1659 sc = ifp->if_softc;
1660
1661 if (!sc->sc_link ||
1662 (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
1663 return;
1664 }
1665
1666 m_head = NULL;
1667 idx = sc->sc_tx_prodidx;
1668 pidx = VGE_PREV_TXDESC(idx);
1669 ofree = sc->sc_tx_free;
1670
1671 /*
1672 * Loop through the send queue, setting up transmit descriptors
1673 * until we drain the queue, or use up all available transmit
1674 * descriptors.
1675 */
1676 for (;;) {
1677 /* Grab a packet off the queue. */
1678 IFQ_POLL(&ifp->if_snd, m_head);
1679 if (m_head == NULL)
1680 break;
1681
1682 if (sc->sc_tx_free == 0) {
1683 /*
1684 * All slots used, stop for now.
1685 */
1686 ifp->if_flags |= IFF_OACTIVE;
1687 break;
1688 }
1689
1690 txs = &sc->sc_txsoft[idx];
1691 KASSERT(txs->txs_mbuf == NULL);
1692
1693 if ((error = vge_encap(sc, m_head, idx))) {
1694 if (error == EFBIG) {
1695 aprint_error_dev(&sc->sc_dev, "Tx packet consumes too many "
1696 "DMA segments, dropping...\n");
1697 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1698 m_freem(m_head);
1699 continue;
1700 }
1701
1702 /*
1703 * Short on resources, just stop for now.
1704 */
1705 if (error == ENOBUFS)
1706 ifp->if_flags |= IFF_OACTIVE;
1707 break;
1708 }
1709
1710 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1711
1712 /*
1713 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1714 */
1715
1716 sc->sc_txdescs[pidx].td_frag[0].tf_buflen |=
1717 htole16(VGE_TXDESC_Q);
1718 VGE_TXFRAGSYNC(sc, pidx, 1,
1719 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1720
1721 if (txs->txs_mbuf != m_head) {
1722 m_freem(m_head);
1723 m_head = txs->txs_mbuf;
1724 }
1725
1726 pidx = idx;
1727 idx = VGE_NEXT_TXDESC(idx);
1728
1729 /*
1730 * If there's a BPF listener, bounce a copy of this frame
1731 * to him.
1732 */
1733 #if NBPFILTER > 0
1734 if (ifp->if_bpf)
1735 bpf_mtap(ifp->if_bpf, m_head);
1736 #endif
1737 }
1738
1739 if (sc->sc_tx_free < ofree) {
1740 /* TX packet queued */
1741
1742 sc->sc_tx_prodidx = idx;
1743
1744 /* Issue a transmit command. */
1745 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1746
1747 /*
1748 * Use the countdown timer for interrupt moderation.
1749 * 'TX done' interrupts are disabled. Instead, we reset the
1750 * countdown timer, which will begin counting until it hits
1751 * the value in the SSTIMER register, and then trigger an
1752 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1753 * the timer count is reloaded. Only when the transmitter
1754 * is idle will the timer hit 0 and an interrupt fire.
1755 */
1756 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1757
1758 /*
1759 * Set a timeout in case the chip goes out to lunch.
1760 */
1761 ifp->if_timer = 5;
1762 }
1763 }
1764
1765 static int
1766 vge_init(struct ifnet *ifp)
1767 {
1768 struct vge_softc *sc;
1769 int i, rc = 0;
1770
1771 sc = ifp->if_softc;
1772
1773 /*
1774 * Cancel pending I/O and free all RX/TX buffers.
1775 */
1776 vge_stop(sc);
1777 vge_reset(sc);
1778
1779 /* Initialize the RX descriptors and mbufs. */
1780 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs));
1781 sc->sc_rx_consumed = 0;
1782 for (i = 0; i < VGE_NRXDESC; i++) {
1783 if (vge_newbuf(sc, i, NULL) == ENOBUFS) {
1784 aprint_error_dev(&sc->sc_dev, "unable to allocate or map "
1785 "rx buffer\n");
1786 return 1; /* XXX */
1787 }
1788 }
1789 sc->sc_rx_prodidx = 0;
1790 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
1791
1792 /* Initialize the TX descriptors and mbufs. */
1793 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1794 bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap,
1795 VGE_CDTXOFF(0), sizeof(sc->sc_txdescs),
1796 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1797 for (i = 0; i < VGE_NTXDESC; i++)
1798 sc->sc_txsoft[i].txs_mbuf = NULL;
1799
1800 sc->sc_tx_prodidx = 0;
1801 sc->sc_tx_considx = 0;
1802 sc->sc_tx_free = VGE_NTXDESC;
1803
1804 /* Set our station address */
1805 for (i = 0; i < ETHER_ADDR_LEN; i++)
1806 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->sc_eaddr[i]);
1807
1808 /*
1809 * Set receive FIFO threshold. Also allow transmission and
1810 * reception of VLAN tagged frames.
1811 */
1812 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1813 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1814
1815 /* Set DMA burst length */
1816 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1817 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1818
1819 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1820
1821 /* Set collision backoff algorithm */
1822 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1823 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1824 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1825
1826 /* Disable LPSEL field in priority resolution */
1827 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1828
1829 /*
1830 * Load the addresses of the DMA queues into the chip.
1831 * Note that we only use one transmit queue.
1832 */
1833
1834 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, VGE_ADDR_LO(VGE_CDTXADDR(sc, 0)));
1835 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_NTXDESC - 1);
1836
1837 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, VGE_ADDR_LO(VGE_CDRXADDR(sc, 0)));
1838 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_NRXDESC - 1);
1839 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_NRXDESC);
1840
1841 /* Enable and wake up the RX descriptor queue */
1842 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1843 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1844
1845 /* Enable the TX descriptor queue */
1846 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1847
1848 /* Set up the receive filter -- allow large frames for VLANs. */
1849 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1850
1851 /* If we want promiscuous mode, set the allframes bit. */
1852 if (ifp->if_flags & IFF_PROMISC) {
1853 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1854 }
1855
1856 /* Set capture broadcast bit to capture broadcast frames. */
1857 if (ifp->if_flags & IFF_BROADCAST) {
1858 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1859 }
1860
1861 /* Set multicast bit to capture multicast frames. */
1862 if (ifp->if_flags & IFF_MULTICAST) {
1863 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1864 }
1865
1866 /* Init the cam filter. */
1867 vge_cam_clear(sc);
1868
1869 /* Init the multicast filter. */
1870 vge_setmulti(sc);
1871
1872 /* Enable flow control */
1873
1874 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1875
1876 /* Enable jumbo frame reception (if desired) */
1877
1878 /* Start the MAC. */
1879 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1880 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1881 CSR_WRITE_1(sc, VGE_CRS0,
1882 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1883
1884 /*
1885 * Configure one-shot timer for microsecond
1886 * resulution and load it for 500 usecs.
1887 */
1888 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
1889 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
1890
1891 /*
1892 * Configure interrupt moderation for receive. Enable
1893 * the holdoff counter and load it, and set the RX
1894 * suppression count to the number of descriptors we
1895 * want to allow before triggering an interrupt.
1896 * The holdoff timer is in units of 20 usecs.
1897 */
1898
1899 #ifdef notyet
1900 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
1901 /* Select the interrupt holdoff timer page. */
1902 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1903 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
1904 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
1905
1906 /* Enable use of the holdoff timer. */
1907 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
1908 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
1909
1910 /* Select the RX suppression threshold page. */
1911 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1912 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
1913 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
1914
1915 /* Restore the page select bits. */
1916 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1917 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
1918 #endif
1919
1920 #ifdef DEVICE_POLLING
1921 /*
1922 * Disable interrupts if we are polling.
1923 */
1924 if (ifp->if_flags & IFF_POLLING) {
1925 CSR_WRITE_4(sc, VGE_IMR, 0);
1926 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1927 } else /* otherwise ... */
1928 #endif /* DEVICE_POLLING */
1929 {
1930 /*
1931 * Enable interrupts.
1932 */
1933 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1934 CSR_WRITE_4(sc, VGE_ISR, 0);
1935 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1936 }
1937
1938 if ((rc = ether_mediachange(ifp)) != 0)
1939 goto out;
1940
1941 ifp->if_flags |= IFF_RUNNING;
1942 ifp->if_flags &= ~IFF_OACTIVE;
1943
1944 sc->sc_if_flags = 0;
1945 sc->sc_link = 0;
1946
1947 callout_schedule(&sc->sc_timeout, hz);
1948
1949 out:
1950 return rc;
1951 }
1952
1953 static void
1954 vge_miibus_statchg(struct device *self)
1955 {
1956 struct vge_softc *sc;
1957 struct mii_data *mii;
1958 struct ifmedia_entry *ife;
1959
1960 sc = (void *)self;
1961 mii = &sc->sc_mii;
1962 ife = mii->mii_media.ifm_cur;
1963 /*
1964 * If the user manually selects a media mode, we need to turn
1965 * on the forced MAC mode bit in the DIAGCTL register. If the
1966 * user happens to choose a full duplex mode, we also need to
1967 * set the 'force full duplex' bit. This applies only to
1968 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
1969 * mode is disabled, and in 1000baseT mode, full duplex is
1970 * always implied, so we turn on the forced mode bit but leave
1971 * the FDX bit cleared.
1972 */
1973
1974 switch (IFM_SUBTYPE(ife->ifm_media)) {
1975 case IFM_AUTO:
1976 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1977 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1978 break;
1979 case IFM_1000_T:
1980 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1981 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1982 break;
1983 case IFM_100_TX:
1984 case IFM_10_T:
1985 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1986 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
1987 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1988 } else {
1989 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1990 }
1991 break;
1992 default:
1993 aprint_error_dev(&sc->sc_dev, "unknown media type: %x\n",
1994 IFM_SUBTYPE(ife->ifm_media));
1995 break;
1996 }
1997 }
1998
1999 static int
2000 vge_ifflags_cb(struct ethercom *ec)
2001 {
2002 struct ifnet *ifp = &ec->ec_if;
2003 struct vge_softc *sc = ifp->if_softc;
2004 int change = ifp->if_flags ^ sc->sc_if_flags;
2005
2006 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
2007 return ENETRESET;
2008 else if ((change & IFF_PROMISC) == 0)
2009 return 0;
2010
2011 if ((ifp->if_flags & IFF_PROMISC) == 0)
2012 CSR_CLRBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
2013 else
2014 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
2015 vge_setmulti(sc);
2016 return 0;
2017 }
2018
2019 static int
2020 vge_ioctl(struct ifnet *ifp, u_long command, void *data)
2021 {
2022 struct vge_softc *sc;
2023 struct ifreq *ifr;
2024 int s, error;
2025
2026 sc = ifp->if_softc;
2027 ifr = (struct ifreq *)data;
2028 error = 0;
2029
2030 s = splnet();
2031
2032 if ((error = ether_ioctl(ifp, command, data)) == ENETRESET) {
2033 error = 0;
2034 if (command != SIOCADDMULTI && command != SIOCDELMULTI)
2035 ;
2036 else if (ifp->if_flags & IFF_RUNNING) {
2037 /*
2038 * Multicast list has changed; set the hardware filter
2039 * accordingly.
2040 */
2041 vge_setmulti(sc);
2042 }
2043 }
2044 sc->sc_if_flags = ifp->if_flags;
2045
2046 splx(s);
2047 return error;
2048 }
2049
2050 static void
2051 vge_watchdog(struct ifnet *ifp)
2052 {
2053 struct vge_softc *sc;
2054 int s;
2055
2056 sc = ifp->if_softc;
2057 s = splnet();
2058 aprint_error_dev(&sc->sc_dev, "watchdog timeout\n");
2059 ifp->if_oerrors++;
2060
2061 vge_txeof(sc);
2062 vge_rxeof(sc);
2063
2064 vge_init(ifp);
2065
2066 splx(s);
2067 }
2068
2069 /*
2070 * Stop the adapter and free any mbufs allocated to the
2071 * RX and TX lists.
2072 */
2073 static void
2074 vge_stop(struct vge_softc *sc)
2075 {
2076 struct ifnet *ifp;
2077 struct vge_txsoft *txs;
2078 struct vge_rxsoft *rxs;
2079 int i, s;
2080
2081 ifp = &sc->sc_ethercom.ec_if;
2082
2083 s = splnet();
2084 ifp->if_timer = 0;
2085
2086 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2087 #ifdef DEVICE_POLLING
2088 ether_poll_deregister(ifp);
2089 #endif /* DEVICE_POLLING */
2090
2091 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2092 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2093 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2094 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2095 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2096 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2097
2098 if (sc->sc_rx_mhead != NULL) {
2099 m_freem(sc->sc_rx_mhead);
2100 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
2101 }
2102
2103 /* Free the TX list buffers. */
2104
2105 for (i = 0; i < VGE_NTXDESC; i++) {
2106 txs = &sc->sc_txsoft[i];
2107 if (txs->txs_mbuf != NULL) {
2108 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2109 m_freem(txs->txs_mbuf);
2110 txs->txs_mbuf = NULL;
2111 }
2112 }
2113
2114 /* Free the RX list buffers. */
2115
2116 for (i = 0; i < VGE_NRXDESC; i++) {
2117 rxs = &sc->sc_rxsoft[i];
2118 if (rxs->rxs_mbuf != NULL) {
2119 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2120 m_freem(rxs->rxs_mbuf);
2121 rxs->rxs_mbuf = NULL;
2122 }
2123 }
2124
2125 splx(s);
2126 }
2127
2128 #if VGE_POWER_MANAGEMENT
2129 /*
2130 * Device suspend routine. Stop the interface and save some PCI
2131 * settings in case the BIOS doesn't restore them properly on
2132 * resume.
2133 */
2134 static int
2135 vge_suspend(struct device *dev)
2136 {
2137 struct vge_softc *sc;
2138 int i;
2139
2140 sc = device_get_softc(dev);
2141
2142 vge_stop(sc);
2143
2144 for (i = 0; i < 5; i++)
2145 sc->sc_saved_maps[i] =
2146 pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2147 sc->sc_saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2148 sc->sc_saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2149 sc->sc_saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2150 sc->sc_saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2151
2152 sc->suspended = 1;
2153
2154 return 0;
2155 }
2156
2157 /*
2158 * Device resume routine. Restore some PCI settings in case the BIOS
2159 * doesn't, re-enable busmastering, and restart the interface if
2160 * appropriate.
2161 */
2162 static int
2163 vge_resume(struct device *dev)
2164 {
2165 struct vge_softc *sc;
2166 struct ifnet *ifp;
2167 int i;
2168
2169 sc = (void *)dev;
2170 ifp = &sc->sc_ethercom.ec_if;
2171
2172 /* better way to do this? */
2173 for (i = 0; i < 5; i++)
2174 pci_write_config(dev, PCIR_MAPS + i * 4,
2175 sc->sc_saved_maps[i], 4);
2176 pci_write_config(dev, PCIR_BIOS, sc->sc_saved_biosaddr, 4);
2177 pci_write_config(dev, PCIR_INTLINE, sc->sc_saved_intline, 1);
2178 pci_write_config(dev, PCIR_CACHELNSZ, sc->sc_saved_cachelnsz, 1);
2179 pci_write_config(dev, PCIR_LATTIMER, sc->sc_saved_lattimer, 1);
2180
2181 /* reenable busmastering */
2182 pci_enable_busmaster(dev);
2183 pci_enable_io(dev, SYS_RES_MEMORY);
2184
2185 /* reinitialize interface if necessary */
2186 if (ifp->if_flags & IFF_UP)
2187 vge_init(sc);
2188
2189 sc->suspended = 0;
2190
2191 return 0;
2192 }
2193 #endif
2194
2195 /*
2196 * Stop all chip I/O so that the kernel's probe routines don't
2197 * get confused by errant DMAs when rebooting.
2198 */
2199 static void
2200 vge_shutdown(void *arg)
2201 {
2202 struct vge_softc *sc;
2203
2204 sc = arg;
2205 vge_stop(sc);
2206 }
2207